QP/C++ Real-Time Embedded Framework (RTEF) This model is used to generate the whole QP/C++ source code. Copyright (c) 2005 Quantum Leaps, LLC. All rights reserved. Q u a n t u m L e a P s ------------------------ Modern Embedded Software SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-QL-commercial The QP/C++ software is dual-licensed under the terms of the open-source GNU General Public License (GPL) or under the terms of one of the closed- source Quantum Leaps commercial licenses. Redistributions in source code must retain this top-level comment block. Plagiarizing this software to sidestep the license obligations is illegal. NOTE: The GPL (see <www.gnu.org/licenses/gpl-3.0>) does NOT permit the incorporation of the QP/C++ software into proprietary programs. Please contact Quantum Leaps for commercial licensing options, which expressly supersede the GPL and are designed explicitly for licensees interested in using QP/C++ in closed-source proprietary applications. Quantum Leaps contact information: <www.state-machine.com/licensing> <info@state-machine.com> public qpcpp 2025-12-31 Copyright (C) 2005 Quantum Leaps, LLC. All rights reserved. Q u a n t u m L e a P s ------------------------ Modern Embedded Software SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-QL-commercial The QP/C++ software is dual-licensed under the terms of the open-source GNU General Public License (GPL) or under the terms of one of the closed- source Quantum Leaps commercial licenses. Redistributions in source code must retain this top-level comment block. Plagiarizing this software to sidestep the license obligations is illegal. NOTE: The GPL does NOT permit the incorporation of this code into proprietary programs. Please contact Quantum Leaps for commercial licensing options, which expressly supersede the GPL and are designed explicitly for closed-source distribution. Quantum Leaps contact information: <www.state-machine.com/licensing> <info@state-machine.com> #48B37CF39D4FD9DE279250B31FD388AFD0BE9B40 = int; = int; = float; = double; //! the current QP version number string in ROM, based on #QP_VERSION_STR {QP_VERSION_STR}; = std::uint8_t; = std::uint16_t; = std::uint32_t; : std::uint8_t { DYNAMIC }; noexcept : sig(s), evtTag_(0x01U), refCtr_(0x0EU) = delete noexcept // no event parameters to initialize noexcept static_cast<void>(dummy); // no event parameters to initialize const noexcept std::uint8_t rc = refCtr_; return (rc <= 2U*QF_MAX_ACTIVE) && (((evtTag_ ^ rc) & 0x0FU) == 0x0FU); const noexcept return static_cast<std::uint8_t>(evtTag_ >> 4U); = std::uint_fast8_t; = QState (*)(void * const me, QEvt const * const e); = QState (*)(void * const me); // forward declaration = void (*)(QXThread * const me); { QMState const *superstate; QStateHandler const stateHandler; QActionHandler const entryAction; QActionHandler const exitAction; QActionHandler const initAction; }; { QMState const *target; QActionHandler const act[1]; }; { QStateHandler fun; QActionHandler act; QXThreadHandler thr; QMState const *obj; QMTranActTable const *tatbl; #ifndef Q_UNSAFE std::uintptr_t uint; #endif constexpr QAsmAttr() : fun(nullptr) {} }; {4}; Abstract State Machine //! All possible return values from state-handlers //! @note //! The order is important for algorithmic correctness. : QState { // unhandled and need to "bubble up" Q_RET_SUPER, //!< event passed to superstate to handle Q_RET_UNHANDLED, //!< event unhandled due to a guard // handled and do not need to "bubble up" Q_RET_HANDLED, //!< event handled (internal transition) Q_RET_IGNORED, //!< event silently ignored (bubbled up to top) // entry/exit Q_RET_ENTRY, //!< state entry action executed Q_RET_EXIT, //!< state exit action executed // no side effects Q_RET_NULL, //!< return value without any effect // transitions need to execute transition-action table in QP::QMsm Q_RET_TRAN, //!< regular transition Q_RET_TRAN_INIT, //!< initial transition in a state // transitions that additionally clobber QHsm.m_state Q_RET_TRAN_HIST, //!< transition to history of a given state }; //! Reserved signals by the QP-framework. : QSignal { Q_EMPTY_SIG, //!< signal to execute the default case Q_ENTRY_SIG, //!< signal for entry actions Q_EXIT_SIG, //!< signal for exit actions Q_INIT_SIG //!< signal for nested initial transitions }; noexcept : m_state(), m_temp () noexcept // empty = 0 this->init(nullptr, qsId); = 0 noexcept static_cast<void>(state); return false; const noexcept return m_state.fun; const noexcept return m_state.obj; noexcept return m_state.fun; noexcept static_cast<void>(me); static_cast<void>(e); return Q_RET_IGNORED; // the top state ignores all events noexcept m_temp.fun = target; return Q_RET_TRAN; noexcept m_temp.fun = hist; return Q_RET_TRAN_HIST; noexcept m_temp.fun = superstate; return Q_RET_SUPER; noexcept m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl); return Q_RET_TRAN; noexcept m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl); return Q_RET_TRAN_INIT; noexcept m_state.obj = hist; m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl); return Q_RET_TRAN_HIST; noexcept m_temp.obj = s; return Q_RET_ENTRY; noexcept static_cast<void>(s); // unused parameter return Q_RET_ENTRY; noexcept m_temp.obj = s; return Q_RET_EXIT; noexcept static_cast<void>(s); // unused parameter return Q_RET_EXIT; Human-generated State Machine noexcept : QAsm() m_state.fun = Q_STATE_CAST(&top); m_temp.fun = initial; override QF_CRIT_STAT QState r; // produce QS dictionary for QP::QHsm::top() #ifdef Q_SPY QS_CRIT_ENTRY(); QS_MEM_SYS(); if ((QS::priv_.flags & 0x01U) == 0U) { QS::priv_.flags |= 0x01U; r = Q_RET_HANDLED; } else { r = Q_RET_IGNORED; } QS_MEM_APP(); QS_CRIT_EXIT(); if (r == Q_RET_HANDLED) { QS_FUN_DICTIONARY(&QP::QHsm::top); } #else Q_UNUSED_PAR(qsId); #endif // def Q_SPY QStateHandler t = m_state.fun; QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(200, (m_temp.fun != nullptr) && (t == Q_STATE_CAST(&top))); QF_CRIT_EXIT(); // execute the top-most initial tran. r = (*m_temp.fun)(this, Q_EVT_CAST(QEvt)); QF_CRIT_ENTRY(); // the top-most initial tran. must be taken Q_ASSERT_INCRIT(210, r == Q_RET_TRAN); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t); // the source state QS_FUN_PRE(m_temp.fun); // the target of the initial tran. QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); // drill down into the state hierarchy with initial transitions... do { QStateHandler path[QHSM_MAX_NEST_DEPTH_]; // tran. entry path array std::int_fast8_t ip = 0; // tran. entry path index path[0] = m_temp.fun; static_cast<void>(QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG)); // note: ip is here the fixed upper loop bound while ((m_temp.fun != t) && (ip < (QHSM_MAX_NEST_DEPTH_ - 1))) { ++ip; path[ip] = m_temp.fun; static_cast<void>(QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG)); } QF_CRIT_ENTRY(); // too many state nesting levels or "malformed" HSM Q_ENSURE_INCRIT(220, ip < QHSM_MAX_NEST_DEPTH_); QF_CRIT_EXIT(); m_temp.fun = path[0]; // retrace the entry path in reverse (desired) order... // note: ip is the fixed upper loop bound do { // enter path[ip] if (QHSM_RESERVED_EVT_(path[ip], Q_ENTRY_SIG) == Q_RET_HANDLED) { QS_STATE_ENTRY_(path[ip], qsId); } --ip; } while (ip >= 0); t = path[0]; // current state becomes the new source r = QHSM_RESERVED_EVT_(t, Q_INIT_SIG); // execute initial tran. #ifdef Q_SPY if (r == Q_RET_TRAN) { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t); // the source state QS_FUN_PRE(m_temp.fun); // the target of the initial tran. QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } #endif // Q_SPY } while (r == Q_RET_TRAN); QF_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_INIT_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t); // the new active state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); m_state.fun = t; // change the current active state #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #endif override this->init(nullptr, qsId); override #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QStateHandler s = m_state.fun; QStateHandler t = s; QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(300, (e != nullptr) && (s != nullptr)); Q_INVARIANT_INCRIT(301, e->verify_() && (m_state.uint == static_cast<std::uintptr_t>(~m_temp.uint))); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_DISPATCH, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s); // the current state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); // process the event hierarchically... QState r; m_temp.fun = s; std::int_fast8_t ip = QHSM_MAX_NEST_DEPTH_; // fixed upper loop bound do { s = m_temp.fun; r = (*s)(this, e); // invoke state handler s if (r == Q_RET_UNHANDLED) { // unhandled due to a guard? QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_UNHANDLED, qsId) QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s); // the current state QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); r = QHSM_RESERVED_EVT_(s, Q_EMPTY_SIG); // superstate of s } --ip; } while ((r == Q_RET_SUPER) && (ip > 0)); QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(310, ip > 0); QF_CRIT_EXIT(); if (r >= Q_RET_TRAN) { // tran. (regular or history) taken? #ifdef Q_SPY if (r == Q_RET_TRAN_HIST) { // tran. to history? QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_TRAN_HIST, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s); // tran. to history source QS_FUN_PRE(m_temp.fun); // tran. to history target QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } #endif // Q_SPY QStateHandler path[QHSM_MAX_NEST_DEPTH_]; path[0] = m_temp.fun; // tran. target path[1] = t; // current state path[2] = s; // tran. source // exit current state to tran. source s... ip = QHSM_MAX_NEST_DEPTH_; // fixed upper loop bound for (; (t != s) && (ip > 0); t = m_temp.fun) { // exit from t if (QHSM_RESERVED_EVT_(t, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(t, qsId); // find superstate of t static_cast<void>(QHSM_RESERVED_EVT_(t, Q_EMPTY_SIG)); } --ip; } QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(320, ip > 0); QF_CRIT_EXIT(); ip = hsm_tran(&path[0], qsId); // take the tran. // execute state entry actions in the desired order... // note: ip is the fixed upper loop bound for (; ip >= 0; --ip) { // enter path[ip] if (QHSM_RESERVED_EVT_(path[ip], Q_ENTRY_SIG) == Q_RET_HANDLED) { QS_STATE_ENTRY_(path[ip], qsId); } } t = path[0]; // stick the target into register m_temp.fun = t; // update the next state // drill into the target hierarchy... while (QHSM_RESERVED_EVT_(t, Q_INIT_SIG) == Q_RET_TRAN) { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t); // the source (pseudo)state QS_FUN_PRE(m_temp.fun); // the target of the tran. QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); ip = 0; path[0] = m_temp.fun; // find superstate static_cast<void>(QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG)); // note: ip is the fixed upper loop bound while ((m_temp.fun != t) && (ip < (QHSM_MAX_NEST_DEPTH_ - 1))) { ++ip; path[ip] = m_temp.fun; // find superstate static_cast<void>( QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG)); } QF_CRIT_ENTRY(); // too many state nesting levels or "malformed" HSM Q_ENSURE_INCRIT(330, ip < QHSM_MAX_NEST_DEPTH_); QF_CRIT_EXIT(); m_temp.fun = path[0]; // retrace the entry path in reverse (correct) order... // note: ip is the fixed upper loop bound do { // enter path[ip] if (QHSM_RESERVED_EVT_(path[ip], Q_ENTRY_SIG) == Q_RET_HANDLED) { QS_STATE_ENTRY_(path[ip], qsId); } --ip; } while (ip >= 0); t = path[0]; // current state becomes the new source } QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s); // the source of the tran. QS_FUN_PRE(t); // the new active state QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } #ifdef Q_SPY else if (r == Q_RET_HANDLED) { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_INTERN_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s); // the source state QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } else { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_IGNORED, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(m_state.fun); // the current state QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } #endif // Q_SPY m_state.fun = t; // change the current active state #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #endif noexcept override QF_CRIT_STAT QF_CRIT_ENTRY(); Q_INVARIANT_INCRIT(602, m_state.uint == static_cast<std::uintptr_t>(~m_temp.uint)); QF_CRIT_EXIT(); bool inState = false; // assume that this HSM is not in 'state' // scan the state hierarchy bottom-up QStateHandler s = m_state.fun; std::int_fast8_t lbound = QHSM_MAX_NEST_DEPTH_ + 1; // fixed upper loop bound QState r = Q_RET_SUPER; for (; (r != Q_RET_IGNORED) && (lbound > 0); --lbound) { if (s == state) { // do the states match? inState = true; // 'true' means that match found break; // break out of the for-loop } else { r = QHSM_RESERVED_EVT_(s, Q_EMPTY_SIG); s = m_temp.fun; } } QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(690, lbound > 0); QF_CRIT_EXIT(); #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #endif return inState; // return the status noexcept QStateHandler child = m_state.fun; // start with current state bool isFound = false; // start with the child not found // establish stable state configuration m_temp.fun = child; QState r; std::int_fast8_t lbound = QHSM_MAX_NEST_DEPTH_; // fixed upper loop bound do { // is this the parent of the current child? if (m_temp.fun == parent) { isFound = true; // child is found r = Q_RET_IGNORED; // break out of the loop } else { child = m_temp.fun; r = QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG); } --lbound; } while ((r != Q_RET_IGNORED) // the top state not reached && (lbound > 0)); #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #else Q_UNUSED_PAR(isFound); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); // NOTE: the following postcondition can only succeed when // (lbound > 0), so no extra check is necessary. Q_ENSURE_INCRIT(890, isFound); QF_CRIT_EXIT(); return child; noexcept override return m_state.fun; #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif std::int_fast8_t ip = -1; // tran. entry path index QStateHandler t = path[0]; QStateHandler const s = path[2]; QF_CRIT_STAT // (a) check source==target (tran. to self)... if (s == t) { // exit source s if (QHSM_RESERVED_EVT_(s, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(s, qsId); } ip = 0; // enter the target } else { // find superstate of target static_cast<void>(QHSM_RESERVED_EVT_(t, Q_EMPTY_SIG)); t = m_temp.fun; // (b) check source==target->super... if (s == t) { ip = 0; // enter the target } else { // find superstate of src static_cast<void>(QHSM_RESERVED_EVT_(s, Q_EMPTY_SIG)); // (c) check source->super==target->super... if (m_temp.fun == t) { // exit source s if (QHSM_RESERVED_EVT_(s, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(s, qsId); } ip = 0; // enter the target } else { // (d) check source->super==target... if (m_temp.fun == path[0]) { // exit source s if (QHSM_RESERVED_EVT_(s, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(s, qsId); } } else { // (e) check rest of source==target->super->super.. // and store the entry path along the way std::int_fast8_t iq = 0; // indicate that LCA was found ip = 1; // enter target and its superstate path[1] = t; // save the superstate of target t = m_temp.fun; // save source->super // find target->super->super... // note: ip is the fixed upper loop bound QState r = QHSM_RESERVED_EVT_(path[1], Q_EMPTY_SIG); while ((r == Q_RET_SUPER) && (ip < (QHSM_MAX_NEST_DEPTH_ - 1))) { ++ip; path[ip] = m_temp.fun; // store the entry path if (m_temp.fun == s) { // is it the source? iq = 1; // indicate that the LCA found --ip; // do not enter the source r = Q_RET_HANDLED; // terminate the loop } else { // it is not the source, keep going up r = QHSM_RESERVED_EVT_(m_temp.fun, Q_EMPTY_SIG); } } QF_CRIT_ENTRY(); // NOTE: The following postcondition succeeds only when // ip < QHSM_MAX_NEST_DEPTH, so no additional check is necessary // too many state nesting levels or "malformed" HSM. Q_ENSURE_INCRIT(510, r != Q_RET_SUPER); QF_CRIT_EXIT(); // the LCA not found yet? if (iq == 0) { // exit source s if (QHSM_RESERVED_EVT_(s, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(s, qsId); } // (f) check the rest of source->super // == target->super->super... iq = ip; r = Q_RET_IGNORED; // indicate that the LCA NOT found // note: iq is the fixed upper loop bound do { if (t == path[iq]) { // is this the LCA? r = Q_RET_HANDLED; // indicate the LCA found ip = iq - 1; // do not enter the LCA iq = -1; // cause termination of the loop } else { --iq; // try lower superstate of target } } while (iq >= 0); // the LCA not found yet? if (r != Q_RET_HANDLED) { // (g) check each source->super->... // for each target->super... r = Q_RET_IGNORED; // keep looping std::int_fast8_t lbound = QHSM_MAX_NEST_DEPTH_; do { // exit from t if (QHSM_RESERVED_EVT_(t, Q_EXIT_SIG) == Q_RET_HANDLED) { QS_STATE_EXIT_(t, qsId); // find superstate of t static_cast<void>( QHSM_RESERVED_EVT_(t, Q_EMPTY_SIG)); } t = m_temp.fun; // set to super of t iq = ip; do { // is this the LCA? if (t == path[iq]) { ip = iq - 1; // do not enter the LCA iq = -1; // break out of inner loop r = Q_RET_HANDLED; // break outer loop } else { --iq; } } while (iq >= 0); --lbound; } while ((r != Q_RET_HANDLED) && (lbound > 0)); QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(530, lbound > 0); QF_CRIT_EXIT(); } } } } } } QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(590, ip < QHSM_MAX_NEST_DEPTH_); QF_CRIT_EXIT(); return ip; Machine-generated State Machine noexcept : QAsm() m_state.obj = &l_msm_top_s; // the current state (top) m_temp.fun = initial; // the initial tran. handler override #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(200, (m_temp.fun != nullptr) && (m_state.obj == &l_msm_top_s)); QF_CRIT_EXIT(); // execute the top-most initial tran. QState r = (*m_temp.fun)(this, Q_EVT_CAST(QEvt)); QF_CRIT_ENTRY(); // the top-most initial tran. must be taken Q_ASSERT_INCRIT(210, r == Q_RET_TRAN_INIT); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(m_state.obj->stateHandler); // source state QS_FUN_PRE(m_temp.tatbl->target->stateHandler); // target state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); // set state to the last tran. target m_state.obj = m_temp.tatbl->target; // drill down into the state hierarchy with initial transitions... std::int_fast8_t lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound do { // execute the tran. table r = execTatbl_(m_temp.tatbl, qsId); --lbound; } while ((r >= Q_RET_TRAN_INIT) && (lbound > 0)); QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(290, lbound > 0); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_INIT_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(m_state.obj->stateHandler); // the new current state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #endif override this->init(nullptr, qsId); override #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QMState const *s = m_state.obj; // store the current state QMState const *t = s; QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(300, (e != nullptr) && (s != nullptr)); Q_INVARIANT_INCRIT(301, e->verify_() && (m_state.uint == static_cast<std::uintptr_t>(~m_temp.uint))); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_DISPATCH, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s->stateHandler); // the current state handler QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); // scan the state hierarchy up to the top state... QState r; std::int_fast8_t lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound do { r = (*t->stateHandler)(this, e); // call state handler function // event handled? (the most frequent case) if (r >= Q_RET_HANDLED) { break; // done scanning the state hierarchy } // event unhandled and passed to the superstate? else if (r == Q_RET_SUPER) { t = t->superstate; // advance to the superstate } else { // event unhandled due to a guard QF_CRIT_ENTRY(); // event must be unhandled due to a guard evaluating to 'false' Q_ASSERT_INCRIT(310, r == Q_RET_UNHANDLED); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_UNHANDLED, qsId) QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t->stateHandler); // the current state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); t = t->superstate; // advance to the superstate } --lbound; } while ((t != nullptr) && (lbound > 0)); QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(320, lbound > 0); QF_CRIT_EXIT(); if (r >= Q_RET_TRAN) { // any kind of tran. taken? QF_CRIT_ENTRY(); // the tran. source state must not be nullptr Q_ASSERT_INCRIT(330, t != nullptr); QF_CRIT_EXIT(); #ifdef Q_SPY QMState const * const ts = t; // for saving tran. table #endif // Q_SPY QMTranActTable const *tatbl; if (r == Q_RET_TRAN_HIST) { // was it tran. to history? QMState const * const hist = m_state.obj; // save history m_state.obj = s; // restore the original state QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_TRAN_HIST, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t->stateHandler); // source state handler QS_FUN_PRE(hist->stateHandler); // target state handler QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); // save the tran-action table before it gets clobbered tatbl = m_temp.tatbl; exitToTranSource_(s, t, qsId); static_cast<void>(execTatbl_(tatbl, qsId)); r = enterHistory_(hist, qsId); s = m_state.obj; t = s; // set target to the current state } lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound while ((r >= Q_RET_TRAN) && (lbound > 0)) { // save the tran-action table before it gets clobbered tatbl = m_temp.tatbl; m_temp.obj = nullptr; // clear exitToTranSource_(s, t, qsId); r = execTatbl_(tatbl, qsId); s = m_state.obj; t = s; // set target to the current state --lbound; } QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(360, lbound > 0); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(ts->stateHandler); // the tran. source QS_FUN_PRE(s->stateHandler); // the new active state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); } #ifdef Q_SPY // was the event handled? else if (r == Q_RET_HANDLED) { QF_CRIT_ENTRY(); // internal tran. source can't be nullptr Q_ASSERT_INCRIT(380, t != nullptr); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_INTERN_TRAN, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(t->stateHandler); // the source state QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); } // event bubbled to the 'top' state? else if (t == nullptr) { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_IGNORED, qsId) QS_TIME_PRE(); // time stamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s->stateHandler); // the current state QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } #endif // Q_SPY else { // empty } #ifndef Q_UNSAFE m_temp.uint = ~m_state.uint; #endif noexcept override return m_state.obj->stateHandler; noexcept override bool inState = false; // assume that this SM is not in 'state' QMState const *s = m_state.obj; std::int_fast8_t lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound for (; (s != nullptr) && (lbound > 0); --lbound) { if (s->stateHandler == state) { // match found? inState = true; break; } else { s = s->superstate; // advance to the superstate } } QF_CRIT_STAT QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(490, lbound > 0); QF_CRIT_EXIT(); return inState; const noexcept QMState const *child = m_state.obj; bool isFound = false; // start with the child not found QMState const *s; std::int_fast8_t lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound for (s = m_state.obj; (s != nullptr) && (lbound > 0); s = s->superstate) { if (s == parent) { isFound = true; // child is found break; } else { child = s; } --lbound; } QF_CRIT_STAT QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(680, lbound > 0); QF_CRIT_EXIT(); if (!isFound) { // still not found? lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound for (s = m_temp.obj; (s != nullptr) && (lbound > 0); s = s->superstate) { if (s == parent) { isFound = true; // child is found break; } else { child = s; } --lbound; } } QF_CRIT_ENTRY(); // NOTE: the following postcondition can only succeed when // (lbound > 0), so no extra check is necessary. Q_ENSURE_INCRIT(690, isFound); QF_CRIT_EXIT(); return child; // return the child #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); // precondition: // - the tran-action table pointer must not be NULL Q_REQUIRE_INCRIT(700, tatbl != nullptr); QF_CRIT_EXIT(); QState r = Q_RET_NULL; std::int_fast8_t lbound = QMSM_MAX_TRAN_LENGTH_; // fixed upper loop bound QActionHandler const *a = &tatbl->act[0]; for (; (*a != nullptr) && (lbound > 0); ++a) { r = (*(*a))(this); // call the action through the 'a' pointer --lbound; #ifdef Q_SPY QS_CRIT_ENTRY(); QS_MEM_SYS(); if (r == Q_RET_ENTRY) { QS_BEGIN_PRE(QS_QEP_STATE_ENTRY, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(m_temp.obj->stateHandler); // entered state QS_END_PRE() } else if (r == Q_RET_EXIT) { QS_BEGIN_PRE(QS_QEP_STATE_EXIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(m_temp.obj->stateHandler); // exited state QS_END_PRE() } else if (r == Q_RET_TRAN_INIT) { QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(tatbl->target->stateHandler); // source QS_FUN_PRE(m_temp.tatbl->target->stateHandler); // target QS_END_PRE() } else { // empty } QS_MEM_APP(); QS_CRIT_EXIT(); #endif // Q_SPY } QF_CRIT_ENTRY(); // NOTE: the following postcondition can only succeed when // (lbound > 0), so no extra check is necessary. Q_ENSURE_INCRIT(790, *a == nullptr); QF_CRIT_EXIT(); m_state.obj = (r >= Q_RET_TRAN) ? m_temp.tatbl->target : tatbl->target; return r; #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT // exit states from the current state to the tran. source state QMState const *s = cs; std::int_fast8_t lbound = QMSM_MAX_NEST_DEPTH_; // fixed upper loop bound for (; (s != ts) && (lbound > 0); --lbound) { // exit action provided in state 's'? if (s->exitAction != nullptr) { // execute the exit action static_cast<void>((*s->exitAction)(this)); QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_EXIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(s->stateHandler); // the exited state handler QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } s = s->superstate; // advance to the superstate } QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(890, lbound > 0); QF_CRIT_EXIT(); #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif // record the entry path from current state to history QMState const *epath[QMSM_MAX_ENTRY_DEPTH_]; QMState const *s = hist; std::int_fast8_t i = 0; // tran. entry path index while ((s != m_state.obj) && (i < (QMSM_MAX_ENTRY_DEPTH_ - 1))) { if (s->entryAction != nullptr) { epath[i] = s; ++i; } s = s->superstate; } QF_CRIT_STAT QF_CRIT_ENTRY(); Q_ASSERT_INCRIT(910, s == m_state.obj); QF_CRIT_EXIT(); // retrace the entry path in reverse (desired) order... while (i > 0) { --i; (*epath[i]->entryAction)(this); // run entry action in epath[i] QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_ENTRY, qsId) QS_OBJ_PRE(this); QS_FUN_PRE(epath[i]->stateHandler); // entered state handler QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } m_state.obj = hist; // set current state to the tran. target // initial tran. present? QState r; if (hist->initAction != nullptr) { r = (*hist->initAction)(this); // execute the tran. action QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QEP_STATE_INIT, qsId) QS_OBJ_PRE(this); // this state machine object QS_FUN_PRE(hist->stateHandler); // source QS_FUN_PRE(m_temp.tatbl->target->stateHandler); // target QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } else { r = Q_RET_NULL; } return r; const noexcept return &l_msm_top_s; \ QP::QState state_ ## _h(QP::QEvt const * const e); \ static QP::QState state_(void * const me, QP::QEvt const * const e) \ QP::QState subclass_::state_(void * const me, QP::QEvt const * const e) { \ return static_cast<subclass_ *>(me)->state_ ## _h(e); } \ QP::QState subclass_::state_ ## _h(QP::QEvt const * const e) (Q_RET_HANDLED) (Q_RET_UNHANDLED) (static_cast<subclass_ const *>(e)) \ (reinterpret_cast<QP::QStateHandler>(handler_)) \ QP::QState state_ ## _h(QP::QEvt const * const e); \ static QP::QState state_(void * const me, QP::QEvt const * const e); \ static QP::QMState const state_ ## _s \ QP::QState action_ ## _h(); \ static QP::QState action_(void * const me) \ QP::QState subclass_::state_(void * const me, QP::QEvt const * const e) {\ return static_cast<subclass_ *>(me)->state_ ## _h(e); } \ QP::QState subclass_::state_ ## _h(QP::QEvt const * const e) \ QP::QState subclass_::action_(void * const me) { \ return static_cast<subclass_ *>(me)->action_ ## _h(); } \ QP::QState subclass_::action_ ## _h() (Q_RET_HANDLED) (Q_RET_HANDLED) (Q_RET_SUPER) (nullptr) (nullptr) (static_cast<void>(par_)) (sizeof(array_) / sizeof((array_)[0U])) (reinterpret_cast<type_ *>(uint_)) init((qsId_)) init(0U) dispatch((e_), (qsId_)) dispatch((e_), 0U) = std::uint16_t; = QEvt const *; = std::uint8_t; = std::uint16_t; = std::uint32_t; = std::uint8_t; = std::uint16_t; = std::uint32_t; noexcept static std::uint8_t const log2LUT[16] = { 0U, 1U, 2U, 2U, 3U, 3U, 3U, 3U, 4U, 4U, 4U, 4U, 4U, 4U, 4U, 4U }; std::uint_fast8_t n = 0U; QP::QPSetBits t; #if (QF_MAX_ACTIVE > 16U) t = static_cast<QP::QPSetBits>(x >> 16U); if (t != 0U) { n += 16U; x = t; } #endif #if (QF_MAX_ACTIVE > 8U) t = (x >> 8U); if (t != 0U) { n += 8U; x = t; } #endif t = (x >> 4U); if (t != 0U) { n += 4U; x = t; } return n + log2LUT[x]; noexcept m_bits[0] = 0U; #if (QF_MAX_ACTIVE > 32) m_bits[1] = 0U; #endif const noexcept #if (QF_MAX_ACTIVE <= 32U) return (m_bits[0] == 0U); #else return (m_bits[0] == 0U) ? (m_bits[1] == 0U) : false; #endif const noexcept #if (QF_MAX_ACTIVE <= 32U) return (m_bits[0] != 0U); #else return (m_bits[0] != 0U) ? true : (m_bits[1] != 0U); #endif const noexcept #if (QF_MAX_ACTIVE <= 32U) return (m_bits[0] & (static_cast<QPSetBits>(1U) << (n - 1U))) != 0U; #else return (n <= 32U) ? ((m_bits[0] & (static_cast<QPSetBits>(1U) << (n - 1U))) != 0U) : ((m_bits[1] & (static_cast<QPSetBits>(1U) << (n - 33U))) != 0U); #endif noexcept #if (QF_MAX_ACTIVE <= 32U) m_bits[0] = (m_bits[0] | (static_cast<QPSetBits>(1U) << (n - 1U))); #else if (n <= 32U) { m_bits[0] = (m_bits[0] | (static_cast<QPSetBits>(1U) << (n - 1U))); } else { m_bits[1] = (m_bits[1] | (static_cast<QPSetBits>(1U) << (n - 33U))); } #endif noexcept #if (QF_MAX_ACTIVE <= 32U) m_bits[0] = (m_bits[0] & static_cast<QPSetBits>(~(1U << (n - 1U)))); #else if (n <= 32U) { (m_bits[0] = (m_bits[0] & ~(static_cast<QPSetBits>(1U) << (n - 1U)))); } else { (m_bits[1] = (m_bits[1] & ~(static_cast<QPSetBits>(1U) << (n - 33U)))); } #endif const noexcept #if (QF_MAX_ACTIVE <= 32U) return QF_LOG2(m_bits[0]); #else return (m_bits[1] != 0U) ? (QF_LOG2(m_bits[1]) + 32U) : (QF_LOG2(m_bits[0])); #endif const noexcept dis->m_bits[0] = ~m_bits[0]; #if (QF_MAX_ACTIVE > 32U) dis->m_bits[1] = ~m_bits[1]; #endif const noexcept #if (QF_MAX_ACTIVE <= 32U) return m_bits[0] == static_cast<QPSetBits>(~dis->m_bits[0]); #else return (m_bits[0] == static_cast<QPSetBits>(~dis->m_bits[0])) && (m_bits[1] == static_cast<QPSetBits>(~dis->m_bits[1])); #endif // friends... // friends... // friends... noexcept : m_ptr_dis(static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(ptr))) // friends... noexcept : QAsm(), m_prio(0U), m_pthre(0U) m_state.fun = Q_STATE_CAST(&top); m_temp.fun = initial; #ifndef Q_UNSAFE m_prio_dis = static_cast<std::uint8_t>(~m_prio); m_pthre_dis = static_cast<std::uint8_t>(~m_pthre); #endif override reinterpret_cast<QHsm *>(this)->QHsm::init(e, qsId); override this->init(nullptr, qsId); override reinterpret_cast<QHsm *>(this)->QHsm::dispatch(e, qsId); noexcept override return reinterpret_cast<QHsm *>(this)->QHsm::isIn(state); noexcept return reinterpret_cast<QHsm *>(this)->QHsm::childState(parent); this->start(prioSpec, qSto, qLen, stkSto, stkSize, nullptr); noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); if (m_pthre == 0U) { // preemption-threshold not defined? m_pthre = m_prio; // apply the default } #ifndef Q_UNSAFE Q_REQUIRE_INCRIT(100, (0U < m_prio) && (m_prio <= QF_MAX_ACTIVE) && (registry_[m_prio] == nullptr) && (m_prio <= m_pthre)); std::uint8_t prev_thre = m_pthre; std::uint8_t next_thre = m_pthre; std::uint_fast8_t p; for (p = static_cast<std::uint_fast8_t>(m_prio) - 1U; p > 0U; --p) { if (registry_[p] != nullptr) { prev_thre = registry_[p]->m_pthre; break; } } for (p = static_cast<std::uint_fast8_t>(m_prio) + 1U; p <= QF_MAX_ACTIVE; ++p) { if (registry_[p] != nullptr) { next_thre = registry_[p]->m_pthre; break; } } Q_ASSERT_INCRIT(190, (prev_thre <= m_pthre) && (m_pthre <= next_thre)); m_prio_dis = static_cast<std::uint8_t>(~m_prio); m_pthre_dis = static_cast<std::uint8_t>(~m_pthre); #endif // Q_UNSAFE // register the AO at the QF-prio. registry_[m_prio] = this; QF_MEM_APP(); QF_CRIT_EXIT(); noexcept std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(200, (0U < p) && (p <= QF_MAX_ACTIVE) && (registry_[p] == this)); registry_[p] = nullptr; // free-up the priority level m_state.fun = nullptr; // invalidate the state QF_MEM_APP(); QF_CRIT_EXIT(); noexcept #ifndef Q_SPY Q_UNUSED_PAR(sender); #endif #ifdef Q_UTEST // test? #if (Q_UTEST != 0) // testing QP-stub? if (m_temp.fun == Q_STATE_CAST(0)) { // QActiveDummy? return static_cast<QActiveDummy *>(this)->fakePost(e, margin, sender); } #endif // (Q_UTEST != 0) #endif // def Q_UTEST QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(200, e != nullptr); QEQueueCtr tmp = m_eQueue.m_nFree; // get volatile into temporary #ifndef Q_UNSAFE QEQueueCtr dis = static_cast<QEQueueCtr>(~m_eQueue.m_nFree_dis); Q_INVARIANT_INCRIT(201, e->verify_() && (tmp == dis)); #endif // ndef Q_UNSAFE // test-probe#1 for faking queue overflow QS_TEST_PROBE_DEF(&QActive::post_) QS_TEST_PROBE_ID(1, tmp = 0U; // fake no free events ) // required margin available? bool status; if (margin == QF::NO_MARGIN) { if (tmp > 0U) { // free entries available in the queue? status = true; // can post } else { // no free entries available status = false; // cannot post // The queue overflows, but QF_NO_MARGIN indicates that // the "event delivery guarantee" is required. Q_ERROR_INCRIT(210); // must be able to post the event } } else if (tmp > static_cast<QEQueueCtr>(margin)) { status = true; // can post } else { // the # free entries below the requested margin status = false; // cannot post, but don't assert } // is it a mutable event? if (e->getPoolNum_() != 0U) { QEvt_refCtr_inc_(e); // increment the reference counter } if (status) { // can post the event? --tmp; // one free entry just used up m_eQueue.m_nFree = tmp; // update the original #ifndef Q_UNSAFE m_eQueue.m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE if (m_eQueue.m_nMin > tmp) { m_eQueue.m_nMin = tmp; // update minimum so far } QS_BEGIN_PRE(QS_QF_ACTIVE_POST, m_prio) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(sender); // the sender object QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(m_eQueue.m_nMin); // min # free entries QS_END_PRE() #ifdef Q_UTEST // callback to examine the posted event under the same conditions // as producing the #QS_QF_ACTIVE_POST trace record, which are: // the local filter for this AO ('m_prio') is set if (QS_LOC_CHECK_(m_prio)) { QF_MEM_APP(); QF_CRIT_EXIT(); QS::onTestPost(sender, this, e, status); QF_CRIT_ENTRY(); QF_MEM_SYS(); } #endif // def Q_UTEST if (m_eQueue.m_frontEvt == nullptr) { // is the queue empty? m_eQueue.m_frontEvt = e; // deliver event directly #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(211, m_eQueue.m_frontEvt_dis == static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr))); m_eQueue.m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(e)); #endif // ndef Q_UNSAFE #ifdef QXK_HPP_ if (m_state.act == nullptr) { // eXtended thread? QXTHREAD_EQUEUE_SIGNAL_(this); // signal eXtended Thread } else { QACTIVE_EQUEUE_SIGNAL_(this); // signal the Active Object } #else QACTIVE_EQUEUE_SIGNAL_(this); // signal the Active Object #endif // def QXK_HPP_ } else { // queue was not empty, insert event into the ring-buffer tmp = m_eQueue.m_head; // get volatile into temporary #ifndef Q_UNSAFE dis = static_cast<QEQueueCtr>(~m_eQueue.m_head_dis); Q_INVARIANT_INCRIT(212, tmp == dis); #endif // ndef Q_UNSAFE m_eQueue.m_ring[tmp] = e; // insert e into buffer if (tmp == 0U) { // need to wrap the head? tmp = m_eQueue.m_end; } --tmp; // advance the head (counter-clockwise) m_eQueue.m_head = tmp; // update the original #ifndef Q_UNSAFE m_eQueue.m_head_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE } QF_MEM_APP(); QF_CRIT_EXIT(); } else { // event cannot be posted QS_BEGIN_PRE(QS_QF_ACTIVE_POST_ATTEMPT, m_prio) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(sender); // the sender object QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(margin); // margin requested QS_END_PRE() #ifdef Q_UTEST // callback to examine the posted event under the same conditions // as producing the #QS_QF_ACTIVE_POST trace record, which are: // the local filter for this AO ('m_prio') is set if (QS_LOC_CHECK_(m_prio)) { QF_MEM_APP(); QF_CRIT_EXIT(); QS::onTestPost(sender, this, e, status); QF_CRIT_ENTRY(); QF_MEM_SYS(); } #endif // def Q_USTEST QF_MEM_APP(); QF_CRIT_EXIT(); #if (QF_MAX_EPOOL > 0U) QF::gc(e); // recycle the event to avoid a leak #endif // (QF_MAX_EPOOL > 0U) } return status; noexcept #ifdef Q_UTEST // test? #if (Q_UTEST != 0) // testing QP-stub? if (m_temp.fun == Q_STATE_CAST(0)) { // QActiveDummy? static_cast<QActiveDummy *>(this)->QActiveDummy::fakePostLIFO(e); return; } #endif // (Q_UTEST != 0) #endif // def Q_UTEST QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(300, e != nullptr); QEQueueCtr tmp = m_eQueue.m_nFree; // get volatile into temporary #ifndef Q_UNSAFE QEQueueCtr dis = static_cast<QEQueueCtr>(~m_eQueue.m_nFree_dis); Q_INVARIANT_INCRIT(301, e->verify_() && (tmp == dis)); #endif // ndef Q_UNSAFE // test-probe#1 for faking queue overflow QS_TEST_PROBE_DEF(&QActive::postLIFO) QS_TEST_PROBE_ID(1, tmp = 0U; // fake no free events ) // The queue must NOT overflow for the LIFO posting policy. Q_REQUIRE_INCRIT(310, tmp != 0U); if (e->getPoolNum_() != 0U) { QEvt_refCtr_inc_(e); // increment the reference counter } --tmp; // one free entry just used up m_eQueue.m_nFree = tmp; // update the original #ifndef Q_UNSAFE m_eQueue.m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE if (m_eQueue.m_nMin > tmp) { m_eQueue.m_nMin = tmp; // update minimum so far } QS_BEGIN_PRE(QS_QF_ACTIVE_POST_LIFO, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(m_eQueue.m_nMin); // min # free entries QS_END_PRE() #ifdef Q_UTEST // callback to examine the posted event under the same conditions // as producing the #QS_QF_ACTIVE_POST trace record, which are: // the local filter for this AO ('m_prio') is set if (QS_LOC_CHECK_(m_prio)) { QF_MEM_APP(); QF_CRIT_EXIT(); QS::onTestPost(nullptr, this, e, true); QF_CRIT_ENTRY(); QF_MEM_SYS(); } #endif // def Q_UTEST QEvt const * const frontEvt = m_eQueue.m_frontEvt; m_eQueue.m_frontEvt = e; // deliver the event directly to the front #ifndef Q_UNSAFE m_eQueue.m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(e)); #endif // ndef Q_UNSAFE if (frontEvt != nullptr) { // was the queue NOT empty? tmp = m_eQueue.m_tail; // get volatile into temporary; #ifndef Q_UNSAFE dis = static_cast<QEQueueCtr>(~m_eQueue.m_tail_dis); Q_INVARIANT_INCRIT(311, tmp == dis); #endif // ndef Q_UNSAFE ++tmp; if (tmp == m_eQueue.m_end) { // need to wrap the tail? tmp = 0U; // wrap around } m_eQueue.m_tail = tmp; #ifndef Q_UNSAFE m_eQueue.m_tail_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE m_eQueue.m_ring[tmp] = frontEvt; } else { // queue was empty QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } QF_MEM_APP(); QF_CRIT_EXIT(); noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // wait for event to arrive directly (depends on QP port) // NOTE: might use assertion-IDs 400-409 QACTIVE_EQUEUE_WAIT_(this); // wait for event to arrive directly // always remove evt from the front QEvt const * const e = m_eQueue.m_frontEvt; QEQueueCtr tmp = m_eQueue.m_nFree; // get volatile into tmp #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(410, e != nullptr); // queue must NOT be empty Q_INVARIANT_INCRIT(411, Q_PTR2UINT_CAST_(e) == static_cast<std::uintptr_t>(~m_eQueue.m_frontEvt_dis)); QEQueueCtr dis = static_cast<QEQueueCtr>(~m_eQueue.m_nFree_dis); Q_INVARIANT_INCRIT(412, tmp == dis); #endif // ndef Q_UNSAFE ++tmp; // one more free event in the queue m_eQueue.m_nFree = tmp; // update the # free #ifndef Q_UNSAFE m_eQueue.m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE if (tmp <= m_eQueue.m_end) { // any events in the ring buffer? QS_BEGIN_PRE(QS_QF_ACTIVE_GET, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_END_PRE() // remove event from the tail tmp = m_eQueue.m_tail; // get volatile into temporary #ifndef Q_UNSAFE dis = static_cast<QEQueueCtr>(~m_eQueue.m_tail_dis); Q_INVARIANT_INCRIT(420, tmp == dis); #endif // ndef Q_UNSAFE QEvt const * const frontEvt = m_eQueue.m_ring[tmp]; #ifndef Q_UNSAFE Q_ASSERT_INCRIT(421, frontEvt != nullptr); m_eQueue.m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(frontEvt)); #endif // ndef Q_UNSAFE m_eQueue.m_frontEvt = frontEvt; // update the original if (tmp == 0U) { // need to wrap the tail? tmp = m_eQueue.m_end; } --tmp; // advance the tail (counter-clockwise) m_eQueue.m_tail = tmp; // update the original #ifndef Q_UNSAFE m_eQueue.m_tail_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE } else { m_eQueue.m_frontEvt = nullptr; // the queue becomes empty #ifndef Q_UNSAFE m_eQueue.m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr)); #endif // ndef Q_UNSAFE // all entries in the queue must be free (+1 for fronEvt) Q_ASSERT_INCRIT(310, tmp == (m_eQueue.m_end + 1U)); QS_BEGIN_PRE(QS_QF_ACTIVE_GET_LAST, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_END_PRE() } QF_MEM_APP(); QF_CRIT_EXIT(); return e; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(400, (prio <= QF_MAX_ACTIVE) && (QActive::registry_[prio] != nullptr)); std::uint_fast16_t const min = static_cast<std::uint_fast16_t>( QActive::registry_[prio]->m_eQueue.getNMin()); QF_CRIT_EXIT(); return min; noexcept subscrList_ = subscrSto; maxPubSignal_ = maxSignal; // initialize the subscriber list for (enum_t sig = 0; sig < maxSignal; ++sig) { subscrSto[sig].m_set.setEmpty(); #ifndef Q_UNSAFE subscrSto[sig].m_set.update_(&subscrSto[sig].m_set_dis); #endif } noexcept #ifndef Q_SPY Q_UNUSED_PAR(sender); Q_UNUSED_PAR(qsId); #endif QSignal const sig = e->sig; QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(200, sig < static_cast<QSignal>(maxPubSignal_)); Q_INVARIANT_INCRIT(202, subscrList_[sig].m_set.verify_(&subscrList_[sig].m_set_dis)); QS_BEGIN_PRE(QS_QF_PUBLISH, qsId) QS_TIME_PRE(); // the timestamp QS_OBJ_PRE(sender); // the sender object QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_END_PRE() // is it a mutable event? if (e->getPoolNum_() != 0U) { // NOTE: The reference counter of a mutable event is incremented to // prevent premature recycling of the event while the multicasting // is still in progress. At the end of the function, the garbage // collector step (QF::gc()) decrements the reference counter and // recycles the event if the counter drops to zero. This covers the // case when the event was published without any subscribers. QEvt_refCtr_inc_(e); } // make a local, modifiable copy of the subscriber set QPSet subscrSet = subscrList_[sig].m_set; QF_MEM_APP(); QF_CRIT_EXIT(); if (subscrSet.notEmpty()) { // any subscribers? // highest-prio subscriber std::uint_fast8_t p = subscrSet.findMax(); QF_CRIT_ENTRY(); QF_MEM_SYS(); QActive *a = registry_[p]; // the AO must be registered with the framework Q_ASSERT_INCRIT(210, a != nullptr); QF_MEM_APP(); QF_CRIT_EXIT(); QF_SCHED_STAT_ QF_SCHED_LOCK_(p); // lock the scheduler up to AO's prio std::uint_fast8_t lbound = QF_MAX_ACTIVE + 1U; do { // loop over all subscribers --lbound; // POST() asserts internally if the queue overflows a->POST(e, sender); subscrSet.remove(p); // remove the handled subscriber if (subscrSet.notEmpty()) { // still more subscribers? p = subscrSet.findMax(); // highest-prio subscriber QF_CRIT_ENTRY(); QF_MEM_SYS(); a = registry_[p]; // the AO must be registered with the framework Q_ASSERT_INCRIT(220, a != nullptr); QF_MEM_APP(); QF_CRIT_EXIT(); } else { p = 0U; // no more subscribers } } while ((p != 0U) && (lbound > 0U)); QF_CRIT_ENTRY(); Q_ENSURE_INCRIT(290, p == 0U); QF_CRIT_EXIT(); QF_SCHED_UNLOCK_(); // unlock the scheduler } // The following garbage collection step decrements the reference counter // and recycles the event if the counter drops to zero. This covers both // cases when the event was published with or without any subscribers. #if (QF_MAX_EPOOL > 0U) QF::gc(e); #endif const noexcept std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(300, (Q_USER_SIG <= sig) && (sig < maxPubSignal_) && (0U < p) && (p <= QF_MAX_ACTIVE) && (registry_[p] == this)); Q_INVARIANT_INCRIT(302, subscrList_[sig].m_set.verify_(&subscrList_[sig].m_set_dis)); QS_BEGIN_PRE(QS_QF_ACTIVE_SUBSCRIBE, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_END_PRE() // insert the prio. into the subscriber set subscrList_[sig].m_set.insert(p); #ifndef Q_UNSAFE subscrList_[sig].m_set.update_(&subscrList_[sig].m_set_dis); #endif QF_MEM_APP(); QF_CRIT_EXIT(); const noexcept std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(400, (Q_USER_SIG <= sig) && (sig < maxPubSignal_) && (0U < p) && (p <= QF_MAX_ACTIVE) && (registry_[p] == this)); Q_INVARIANT_INCRIT(402, subscrList_[sig].m_set.verify_(&subscrList_[sig].m_set_dis)); QS_BEGIN_PRE(QS_QF_ACTIVE_UNSUBSCRIBE, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_END_PRE() // remove the prio. from the subscriber set subscrList_[sig].m_set.remove(p); #ifndef Q_UNSAFE subscrList_[sig].m_set.update_(&subscrList_[sig].m_set_dis); #endif QF_MEM_APP(); QF_CRIT_EXIT(); const noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio); Q_REQUIRE_INCRIT(500, (0U < p) && (p <= QF_MAX_ACTIVE) && (registry_[p] == this)); enum_t const maxPubSig = maxPubSignal_; QF_MEM_APP(); QF_CRIT_EXIT(); for (enum_t sig = Q_USER_SIG; sig < maxPubSig; ++sig) { QF_CRIT_ENTRY(); QF_MEM_SYS(); if (subscrList_[sig].m_set.hasElement(p)) { subscrList_[sig].m_set.remove(p); #ifndef Q_UNSAFE subscrList_[sig].m_set.update_(&subscrList_[sig].m_set_dis); #endif QS_BEGIN_PRE(QS_QF_ACTIVE_UNSUBSCRIBE, m_prio) QS_TIME_PRE(); // timestamp QS_SIG_PRE(sig); // the signal of this event QS_OBJ_PRE(this); // this active object QS_END_PRE() } QF_MEM_APP(); QF_CRIT_EXIT(); QF_CRIT_EXIT_NOP(); // prevent merging critical sections } const noexcept bool const status = eq->post(e, 0U, m_prio); QS_CRIT_STAT QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_ACTIVE_DEFER, m_prio) QS_TIME_PRE(); // time stamp QS_OBJ_PRE(this); // this active object QS_OBJ_PRE(eq); // the deferred queue QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); return status; noexcept QEvt const * const e = eq->get(m_prio); // get evt from deferred queue QF_CRIT_STAT bool recalled; if (e != nullptr) { // event available? postLIFO(e); // post it to the _front_ of the AO's queue QF_CRIT_ENTRY(); QF_MEM_SYS(); if (e->getPoolNum_() != 0U) { // is it a mutable event? // after posting to the AO's queue the event must be referenced // at least twice: once in the deferred event queue (eq->get() // did NOT decrement the reference counter) and once in the // AO's event queue. Q_ASSERT_INCRIT(210, e->refCtr_ >= 2U); // we need to decrement the reference counter once, to account // for removing the event from the deferred event queue. QEvt_refCtr_dec_(e); // decrement the reference counter } QS_BEGIN_PRE(QS_QF_ACTIVE_RECALL, m_prio) QS_TIME_PRE(); // time stamp QS_OBJ_PRE(this); // this active object QS_OBJ_PRE(eq); // the deferred queue QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_END_PRE() QF_MEM_APP(); QF_CRIT_EXIT(); recalled = true; } else { QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_ACTIVE_RECALL_ATTEMPT, m_prio) QS_TIME_PRE(); // time stamp QS_OBJ_PRE(this); // this active object QS_OBJ_PRE(eq); // the deferred queue QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); recalled = false; } return recalled; const noexcept std::uint_fast16_t n = 0U; while (n < num) { QEvt const * const e = eq->get(m_prio); if (e != nullptr) { ++n; // count one more flushed event #if (QF_MAX_EPOOL > 0U) QF::gc(e); // garbage collect #endif } else { break; } } return n; const noexcept return static_cast<std::uint_fast8_t>(m_prio); noexcept m_prio = static_cast<std::uint8_t>(prio & 0xFFU); m_pthre = static_cast<std::uint8_t>(prio >> 8U); const noexcept return static_cast<std::uint_fast8_t>(m_pthre); const noexcept return m_eQueue; const noexcept return m_osObject; const noexcept return m_thread; m_thread = thr; noexcept noexcept noexcept : QActive(initial) m_state.obj = reinterpret_cast<QMsm *>(this)->topQMState(); m_temp.fun = initial; override reinterpret_cast<QMsm *>(this)->QMsm::init(e, qsId); override this->init(nullptr, qsId); override reinterpret_cast<QMsm *>(this)->QMsm::dispatch(e, qsId); noexcept override return reinterpret_cast<QMsm *>(this)->QMsm::isIn(state); noexcept override return reinterpret_cast<QMsm *>(this)->QMsm::getStateHandler(); const noexcept return reinterpret_cast<QMsm const *>(this) ->QMsm::childStateObj(parent); noexcept : QEvt(sig), m_next(nullptr), #ifndef Q_UNSAFE m_next_dis(static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr))), #endif m_act(act), m_ctr(0U), #ifndef Q_UNSAFE m_ctr_dis(static_cast<QTimeEvtCtr>(~static_cast<QTimeEvtCtr>(0U))), #endif m_interval(0U), m_tickRate(0U), m_flags(0U) QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(300, (sig != 0U) && (tickRate < QF_MAX_TICK_RATE)); QF_CRIT_EXIT(); // adjust the settings from the QEvt(sig) ctor evtTag_ = 0x0FU; refCtr_ = 0U; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // dynamic range checks #if (QF_TIMEEVT_CTR_SIZE == 1U) Q_REQUIRE_INCRIT(400, (nTicks < 0xFFU) && (interval < 0xFFU)); #elif (QF_TIMEEVT_CTR_SIZE == 2U) Q_REQUIRE_INCRIT(400, (nTicks < 0xFFFFU) && (interval < 0xFFFFU)); #endif Q_REQUIRE_INCRIT(401, verify_() && (nTicks != 0U)); QTimeEvtCtr const ctr = m_ctr; std::uint8_t const tickRate = m_tickRate; #ifdef Q_SPY std::uint_fast8_t const qsId = static_cast<QActive const *>(m_act)->m_prio; #endif // def Q_SPY Q_REQUIRE_INCRIT(410, (ctr == 0U) && (m_act != nullptr) && (tickRate < QF_MAX_TICK_RATE)); #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(411, ctr == static_cast<QTimeEvtCtr>(~m_ctr_dis)); #else Q_UNUSED_PAR(ctr); #endif // ndef Q_UNSAFE m_ctr = static_cast<QTimeEvtCtr>(nTicks); m_interval = static_cast<QTimeEvtCtr>(interval); #ifndef Q_UNSAFE m_ctr_dis = static_cast<QTimeEvtCtr>(~nTicks); #endif // ndef Q_UNSAFE // is the time event unlinked? // NOTE: For the duration of a single clock tick of the specified tick // rate a time event can be disarmed and yet still linked into the list // because un-linking is performed exclusively in the QF_tickX() function. if ((m_flags & QTE_FLAG_IS_LINKED) == 0U) { m_flags |= QTE_FLAG_IS_LINKED; // mark as linked // The time event is initially inserted into the separate // "freshly armed" list based on timeEvtHead_[tickRate].act. // Only later, inside QTimeEvt::tick(), the "freshly armed" // list is appended to the main list of armed time events based on // timeEvtHead_[tickRate].next. Again, this is to keep any // changes to the main list exclusively inside QTimeEvt::tick(). #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(420, Q_PTR2UINT_CAST_(m_next) == static_cast<std::uintptr_t>(~m_next_dis)); Q_INVARIANT_INCRIT(411, Q_PTR2UINT_CAST_(timeEvtHead_[tickRate].m_act) == static_cast<std::uintptr_t>(~timeEvtHead_dis_[tickRate].m_ptr_dis)); #endif m_next = timeEvtHead_[tickRate].toTimeEvt(); timeEvtHead_[tickRate].m_act = this; #ifndef Q_UNSAFE m_next_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(m_next)); timeEvtHead_dis_[tickRate].m_ptr_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(this)); #endif // ndef Q_UNSAFE } QS_BEGIN_PRE(QS_QF_TIMEEVT_ARM, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this time event object QS_OBJ_PRE(m_act); // the active object QS_TEC_PRE(nTicks); // the # ticks QS_TEC_PRE(interval); // the interval QS_U8_PRE(tickRate); // tick rate QS_END_PRE() QF_MEM_APP(); QF_CRIT_EXIT(); noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(500, verify_()); QTimeEvtCtr const ctr = m_ctr; #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(501, ctr == static_cast<QTimeEvtCtr>(~m_ctr_dis)); #endif // ndef Q_UNSAFE #ifdef Q_SPY std::uint_fast8_t const qsId = static_cast<QActive *>(m_act)->m_prio; #endif // was the time event actually armed? bool wasArmed; if (ctr != 0U) { wasArmed = true; m_flags |= QTE_FLAG_WAS_DISARMED; m_ctr = 0U; // schedule removal from the list #ifndef Q_UNSAFE m_ctr_dis = static_cast<QTimeEvtCtr>(~static_cast<QTimeEvtCtr>(0U)); #endif // ndef Q_UNSAFE QS_BEGIN_PRE(QS_QF_TIMEEVT_DISARM, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this time event object QS_OBJ_PRE(m_act); // the target AO QS_TEC_PRE(ctr); // the # ticks QS_TEC_PRE(m_interval); // the interval QS_U8_PRE(m_tickRate); // tick rate QS_END_PRE() } else { // the time event was already disarmed automatically wasArmed = false; m_flags &= static_cast<std::uint8_t>(~QTE_FLAG_WAS_DISARMED); QS_BEGIN_PRE(QS_QF_TIMEEVT_DISARM_ATTEMPT, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this time event object QS_OBJ_PRE(m_act); // the target AO QS_U8_PRE(m_tickRate); // tick rate QS_END_PRE() } QF_MEM_APP(); QF_CRIT_EXIT(); return wasArmed; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // dynamic range checks #if (QF_TIMEEVT_CTR_SIZE == 1U) Q_REQUIRE_INCRIT(600, nTicks < 0xFFU); #elif (QF_TIMEEVT_CTR_SIZE == 2U) Q_REQUIRE_INCRIT(600, nTicks < 0xFFFFU); #endif Q_REQUIRE_INCRIT(601, verify_() && (nTicks != 0U)); QTimeEvtCtr const ctr = m_ctr; std::uint8_t const tickRate = m_tickRate; #ifdef Q_SPY std::uint_fast8_t const qsId = static_cast<QActive *>(m_act)->m_prio; #endif Q_REQUIRE_INCRIT(610, (m_act != nullptr) && (tickRate < QF_MAX_TICK_RATE)); #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(602, ctr == static_cast<QTimeEvtCtr>(~m_ctr_dis)); #endif // ndef Q_UNSAFE m_ctr = static_cast<QTimeEvtCtr>(nTicks); #ifndef Q_UNSAFE m_ctr_dis = static_cast<QTimeEvtCtr>(~nTicks); #endif // ndef Q_UNSAFE // is the time evt not running? bool wasArmed; if (ctr == 0U) { wasArmed = false; // NOTE: For a duration of a single clock tick of the specified // tick rate a time event can be disarmed and yet still linked into // the list, because unlinking is performed exclusively in the // QTimeEvt::tick() function. // was the time event unlinked? if ((m_flags & QTE_FLAG_IS_LINKED) == 0U) { m_flags |= QTE_FLAG_IS_LINKED; // mark as linked // The time event is initially inserted into the separate // "freshly armed" list based on timeEvtHead_[tickRate].act. // Only later, inside QTimeEvt::tick(), the "freshly armed" // list is appended to the main list of armed time events based on // timeEvtHead_[tickRate].next. Again, this is to keep any // changes to the main list exclusively inside QTimeEvt::tick(). #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(620, Q_PTR2UINT_CAST_(m_next) == static_cast<std::uintptr_t>(~m_next_dis)); Q_INVARIANT_INCRIT(611, Q_PTR2UINT_CAST_(timeEvtHead_[tickRate].m_act) == static_cast<std::uintptr_t>(~timeEvtHead_dis_[tickRate].m_ptr_dis)); #endif m_next = timeEvtHead_[tickRate].toTimeEvt(); timeEvtHead_[tickRate].m_act = this; #ifndef Q_UNSAFE m_next_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(m_next)); timeEvtHead_dis_[tickRate].m_ptr_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(this)); #endif // ndef Q_UNSAFE } } else { // the time event was armed wasArmed = true; } QS_BEGIN_PRE(QS_QF_TIMEEVT_REARM, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this time event object QS_OBJ_PRE(m_act); // the target AO QS_TEC_PRE(nTicks); // the # ticks QS_TEC_PRE(m_interval); // the interval QS_2U8_PRE(tickRate, (wasArmed ? 1U : 0U)); QS_END_PRE() QF_MEM_APP(); QF_CRIT_EXIT(); return wasArmed; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); bool const wasDisarmed = (m_flags & QTE_FLAG_WAS_DISARMED) != 0U; m_flags |= QTE_FLAG_WAS_DISARMED; QF_MEM_APP(); QF_CRIT_EXIT(); return wasDisarmed; const noexcept return m_act; const noexcept return m_ctr; const noexcept return m_interval; const noexcept return m_tickRate; noexcept #ifndef Q_SPY Q_UNUSED_PAR(sender); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(800, tickRate < Q_DIM(timeEvtHead_)); QTimeEvt *prev = &timeEvtHead_[tickRate]; QS_BEGIN_PRE(QS_QF_TICK, 0U) prev->m_ctr = (prev->m_ctr + 1U); QS_TEC_PRE(prev->m_ctr); // tick ctr QS_U8_PRE(tickRate); // tick rate QS_END_PRE() // scan the linked-list of time events at this rate... std::uint_fast8_t lbound = 2U*QF_MAX_ACTIVE; // fixed upper loop bound for (; lbound > 0U; --lbound) { Q_ASSERT_INCRIT(810, prev != nullptr); // sanity check QTimeEvt *te = prev->m_next; // advance down the time evt. list #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(811, Q_PTR2UINT_CAST_(te) == static_cast<std::uintptr_t>(~prev->m_next_dis)); #endif // ndef Q_UNSAFE if (te == nullptr) { // end of the list? // any new time events armed since the last QTimeEvt_tick_()? if (timeEvtHead_[tickRate].m_act != nullptr) { #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(812, Q_PTR2UINT_CAST_(timeEvtHead_[tickRate].m_act) == static_cast<std::uintptr_t>( ~timeEvtHead_dis_[tickRate].m_ptr_dis)); #endif // ndef Q_UNSAFE prev->m_next = timeEvtHead_[tickRate].toTimeEvt(); timeEvtHead_[tickRate].m_act = nullptr; #ifndef Q_UNSAFE prev->m_next_dis = static_cast<std::uintptr_t>( ~Q_PTR2UINT_CAST_(prev->m_next)); timeEvtHead_dis_[tickRate].m_ptr_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr)); #endif // ndef Q_UNSAFE te = prev->m_next; // switch to the new list } else { // all currently armed time events are processed break; // terminate the for-loop } } // the time event 'te' must be valid Q_ASSERT_INCRIT(820, te != nullptr); Q_INVARIANT_INCRIT(821, te->verify_()); QTimeEvtCtr ctr = te->m_ctr; #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(822, ctr == static_cast<QTimeEvtCtr>(~te->m_ctr_dis)); #endif // ndef Q_UNSAFE if (ctr == 0U) { // time event scheduled for removal? prev->m_next = te->m_next; #ifndef Q_UNSAFE prev->m_next_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(te->m_next)); #endif // ndef Q_UNSAFE // mark time event 'te' as NOT linked te->m_flags &= static_cast<std::uint8_t>(~QTE_FLAG_IS_LINKED); // do NOT advance the prev pointer QF_MEM_APP(); QF_CRIT_EXIT(); // exit crit. section to reduce latency // NOTE: prevent merging critical sections // In some QF ports the critical section exit takes effect only // on the next machine instruction. If the next instruction is // another entry to a critical section, the critical section // might not be really exited, but rather the two adjacent // critical sections would be MERGED. The QF_CRIT_EXIT_NOP() // macro contains minimal code required to prevent such merging // of critical sections in QF ports, in which it can occur. QF_CRIT_EXIT_NOP(); } else if (ctr == 1U) { // is time evt about to expire? QActive * const act = te->toActive(); if (te->m_interval != 0U) { // periodic time evt? te->m_ctr = te->m_interval; // rearm the time event #ifndef Q_UNSAFE te->m_ctr_dis = static_cast<QTimeEvtCtr>(~te->m_interval); #endif // ndef Q_UNSAFE prev = te; // advance to this time event } else { // one-shot time event: automatically disarm te->m_ctr = 0U; prev->m_next = te->m_next; #ifndef Q_UNSAFE te->m_ctr_dis = static_cast<QTimeEvtCtr>(~static_cast<QTimeEvtCtr>(0U)); prev->m_next_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(te->m_next)); #endif // ndef Q_UNSAFE // mark time event 'te' as NOT linked te->m_flags &= static_cast<std::uint8_t>(~QTE_FLAG_IS_LINKED); // do NOT advance the prev pointer QS_BEGIN_PRE(QS_QF_TIMEEVT_AUTO_DISARM, act->m_prio) QS_OBJ_PRE(te); // this time event object QS_OBJ_PRE(act); // the target AO QS_U8_PRE(tickRate); // tick rate QS_END_PRE() } QS_BEGIN_PRE(QS_QF_TIMEEVT_POST, act->m_prio) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(te); // the time event object QS_SIG_PRE(te->sig); // signal of this time event QS_OBJ_PRE(act); // the target AO QS_U8_PRE(tickRate); // tick rate QS_END_PRE() #ifdef QXK_HPP_ if (te->sig < Q_USER_SIG) { QXThread::timeout_(act); QF_MEM_APP(); QF_CRIT_EXIT(); } else { QF_MEM_APP(); QF_CRIT_EXIT(); // exit crit. section before posting // act->POST() asserts if the queue overflows act->POST(te, sender); } #else QF_MEM_APP(); QF_CRIT_EXIT(); // exit crit. section before posting // act->POST() asserts if the queue overflows act->POST(te, sender); #endif } else { // time event keeps timing out --ctr; // decrement the tick counter te->m_ctr = ctr; // update the original #ifndef Q_UNSAFE te->m_ctr_dis = static_cast<QTimeEvtCtr>(~ctr); #endif // ndef Q_UNSAFE prev = te; // advance to this time event QF_MEM_APP(); QF_CRIT_EXIT(); // exit crit. section to reduce latency // prevent merging critical sections, see NOTE above QF_CRIT_EXIT_NOP(); } QF_CRIT_ENTRY(); // re-enter crit. section to continue the loop QF_MEM_SYS(); } Q_ENSURE_INCRIT(890, lbound > 0U); QF_MEM_APP(); QF_CRIT_EXIT(); noexcept noexcept // NOTE: this function must be called *inside* critical section Q_REQUIRE_INCRIT(900, tickRate < QF_MAX_TICK_RATE); bool inactive; QF_MEM_SYS(); if (timeEvtHead_[tickRate].m_next != nullptr) { inactive = false; } else if (timeEvtHead_[tickRate].m_act != nullptr) { inactive = false; } else { inactive = true; } QF_MEM_APP(); return inactive; noexcept return static_cast<QActive *>(m_act); noexcept return static_cast<QTimeEvt *>(m_act); noexcept : QEvt(0U), m_next(nullptr), #ifndef Q_UNSAFE m_next_dis(static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr))), #endif m_act(nullptr), m_ctr(0U), #ifndef Q_UNSAFE m_ctr_dis(static_cast<QTimeEvtCtr>(~static_cast<QTimeEvtCtr>(0U))), #endif m_interval(0U), m_tickRate(0U), m_flags(0U) = delete = delete noexcept : QActive(nullptr) // reuse m_head for tick-rate m_eQueue.m_head = static_cast<QEQueueCtr>(tickRate); #ifndef Q_UNSAFE m_eQueue.m_head_dis = static_cast<QEQueueCtr>(~tickRate); #endif // ndef Q_UNSAFE override Q_UNUSED_PAR(e); Q_UNUSED_PAR(qsId); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); m_eQueue.m_tail = 0U; #ifndef Q_UNSAFE m_eQueue.m_tail_dis = static_cast<QEQueueCtr>(~0U); #endif // ndef Q_UNSAFE QF_MEM_APP(); QF_CRIT_EXIT(); override this->init(nullptr, qsId); override Q_UNUSED_PAR(e); Q_UNUSED_PAR(qsId); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // get volatile into temporaries QEQueueCtr nTicks = m_eQueue.m_tail; QEQueueCtr const tickRate = m_eQueue.m_head; #ifndef Q_UNSAFE Q_REQUIRE_INCRIT(700, nTicks > 0U); Q_INVARIANT_INCRIT(701, nTicks == static_cast<QEQueueCtr>(~m_eQueue.m_tail_dis)); Q_INVARIANT_INCRIT(702, tickRate == static_cast<QEQueueCtr>(~m_eQueue.m_head_dis)); #endif // ndef Q_UNSAFE m_eQueue.m_tail = 0U; // clear # ticks #ifndef Q_UNSAFE m_eQueue.m_tail_dis = static_cast<QEQueueCtr>(~0U); #endif // ndef Q_UNSAFE QF_MEM_APP(); QF_CRIT_EXIT(); for (; nTicks > 0U; --nTicks) { QTimeEvt::tick(static_cast<std::uint_fast8_t>(tickRate), this); } noexcept #ifndef Q_SPY Q_UNUSED_PAR(sender); #endif static QEvt const tickEvt(0U); // immutable event QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); QEQueueCtr nTicks = m_eQueue.m_tail; // get volatile into temporary if (m_eQueue.m_frontEvt == nullptr) { #ifndef Q_UNSAFE Q_REQUIRE_INCRIT(800, nTicks == 0U); Q_REQUIRE_INCRIT(801, m_eQueue.m_nFree == 1U); Q_INVARIANT_INCRIT(802, m_eQueue.m_frontEvt_dis == static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr))); Q_INVARIANT_INCRIT(803, 1U == static_cast<QEQueueCtr>(~m_eQueue.m_nFree_dis)); Q_INVARIANT_INCRIT(804, 0U == static_cast<QEQueueCtr>(~m_eQueue.m_tail_dis)); #endif // ndef Q_UNSAFE m_eQueue.m_frontEvt = &tickEvt; // deliver event directly m_eQueue.m_nFree = 0U; #ifndef Q_UNSAFE m_eQueue.m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(&tickEvt)); m_eQueue.m_nFree_dis = static_cast<QEQueueCtr>(~0U); #endif // ndef Q_UNSAFE QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { #ifndef Q_UNSAFE Q_REQUIRE_INCRIT(810, (nTicks > 0U) && (nTicks < 0xFFU)); Q_REQUIRE_INCRIT(811, m_eQueue.m_nFree == 0U); Q_INVARIANT_INCRIT(812, m_eQueue.m_frontEvt_dis == static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(&tickEvt))); Q_INVARIANT_INCRIT(813, 0U == static_cast<QEQueueCtr>(~m_eQueue.m_nFree_dis)); Q_INVARIANT_INCRIT(814, nTicks == static_cast<QEQueueCtr>(~m_eQueue.m_tail_dis)); #endif // ndef Q_UNSAFE } ++nTicks; // account for one more tick event m_eQueue.m_tail = nTicks; // update the original #ifndef Q_UNSAFE m_eQueue.m_tail_dis = static_cast<QEQueueCtr>(~nTicks); #endif // ndef Q_UNSAFE QS_BEGIN_PRE(QS_QF_ACTIVE_POST, m_prio) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(sender); // the sender object QS_SIG_PRE(0U); // the signal of the event QS_OBJ_PRE(this); // this active object QS_2U8_PRE(0U, 0U); // poolNum & refCtr QS_EQC_PRE(0U); // # free entries QS_EQC_PRE(0U); // min # free entries QS_END_PRE() QF_MEM_APP(); QF_CRIT_EXIT(); // friends... noexcept : m_frontEvt(nullptr), m_ring(nullptr), m_end(0U), m_head(0U), m_tail(0U), m_nFree(0U), #ifndef Q_UNSAFE m_frontEvt_dis(static_cast<std::uintptr_t>(~0U)), m_head_dis(static_cast<QEQueueCtr>(~0U)), m_tail_dis(static_cast<QEQueueCtr>(~0U)), m_nFree_dis(static_cast<QEQueueCtr>(~0U)), #endif m_nMin(0U) noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); #if (QF_EQUEUE_CTR_SIZE == 1U) Q_REQUIRE_INCRIT(100, qLen < 0xFFU); #endif m_frontEvt = nullptr; // no events in the queue m_ring = &qSto[0]; m_end = static_cast<QEQueueCtr>(qLen); if (qLen > 0U) { m_head = 0U; m_tail = 0U; } m_nFree = static_cast<QEQueueCtr>(qLen + 1U); //+1 for frontEvt m_nMin = m_nFree; #ifndef Q_UNSAFE m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(m_frontEvt)); m_head_dis = static_cast<QEQueueCtr>(~m_head); m_tail_dis = static_cast<QEQueueCtr>(~m_tail); m_nFree_dis = static_cast<QEQueueCtr>(~m_nFree); #endif QF_MEM_APP(); QF_CRIT_EXIT(); noexcept #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(200, e != nullptr); Q_INVARIANT_INCRIT(201, e->verify_()); QEQueueCtr tmp = m_nFree; // get volatile into temporary #ifndef Q_UNSAFE QEQueueCtr dis = static_cast<QEQueueCtr>(~m_nFree_dis); Q_INVARIANT_INCRIT(201, tmp == dis); #endif // ndef Q_UNSAFE // test-probe#1 for faking queue overflow QS_TEST_PROBE_DEF(&QEQueue::post) QS_TEST_PROBE_ID(1, tmp = 0U; // fake no free events ) // required margin available? bool status; if (((margin == QF::NO_MARGIN) && (tmp > 0U)) || (tmp > static_cast<QEQueueCtr>(margin))) { // is it a mutable event? if (e->getPoolNum_() != 0U) { QEvt_refCtr_inc_(e); // increment the reference counter } --tmp; // one free entry just used up m_nFree = tmp; // update the original #ifndef Q_UNSAFE m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE if (m_nMin > tmp) { m_nMin = tmp; // update minimum so far } QS_BEGIN_PRE(QS_QF_EQUEUE_POST, qsId) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of the event QS_OBJ_PRE(this); // this queue object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(m_nMin); // min # free entries QS_END_PRE() if (m_frontEvt == nullptr) { // is the queue empty? m_frontEvt = e; // deliver event directly #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(211, m_frontEvt_dis == static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr))); m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(e)); #endif // ndef Q_UNSAFE } else { // queue was not empty, insert event into the ring-buffer tmp = m_head; // get volatile into temporary #ifndef Q_UNSAFE dis = static_cast<QEQueueCtr>(~m_head_dis); Q_INVARIANT_INCRIT(212, tmp == dis); #endif // ndef Q_UNSAFE m_ring[tmp] = e; // insert e into buffer if (tmp == 0U) { // need to wrap the head? tmp = m_end; } --tmp; // advance head (counter-clockwise) m_head = tmp; // update the original #ifndef Q_UNSAFE m_head_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE } status = true; // event posted successfully } else { // event cannot be posted // dropping events must be acceptable Q_ASSERT_INCRIT(210, margin != QF::NO_MARGIN); QS_BEGIN_PRE(QS_QF_EQUEUE_POST_ATTEMPT, qsId) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this queue object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(margin); // margin requested QS_END_PRE() status = false; // event not posted } QF_MEM_APP(); QF_CRIT_EXIT(); return status; noexcept #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(300, e != nullptr); Q_INVARIANT_INCRIT(301, e->verify_()); QEQueueCtr tmp = m_nFree; // get volatile into temporary #ifndef Q_UNSAFE QEQueueCtr dis = static_cast<QEQueueCtr>(~m_nFree_dis); Q_INVARIANT_INCRIT(301, tmp == dis); #endif // ndef Q_UNSAFE // test-probe#1 for faking queue overflow QS_TEST_PROBE_DEF(&QEQueue::postLIFO) QS_TEST_PROBE_ID(1, tmp = 0U; // fake no free events ) // must be able to LIFO-post the event Q_REQUIRE_INCRIT(310, tmp != 0U); if (e->getPoolNum_() != 0U) { // is it a mutable event? QEvt_refCtr_inc_(e); // increment the reference counter } --tmp; // one free entry just used up m_nFree = tmp; // update the original #ifndef Q_UNSAFE m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE if (m_nMin > tmp) { m_nMin = tmp; // update minimum so far } QS_BEGIN_PRE(QS_QF_EQUEUE_POST_LIFO, qsId) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this queue object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_EQC_PRE(m_nMin); // min # free entries QS_END_PRE() QEvt const * const frontEvt = m_frontEvt; // read into temporary m_frontEvt = e; // deliver the event directly to the front #ifndef Q_UNSAFE m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(e)); #endif // ndef Q_UNSAFE if (frontEvt != nullptr) { // was the queue NOT empty? tmp = m_tail; // get volatile into temporary; #ifndef Q_UNSAFE dis = static_cast<QEQueueCtr>(~m_tail_dis); Q_INVARIANT_INCRIT(311, tmp == dis); #endif // ndef Q_UNSAFE ++tmp; if (tmp == m_end) { // need to wrap the tail? tmp = 0U; // wrap around } m_tail = tmp; #ifndef Q_UNSAFE m_tail_dis = static_cast<QEQueueCtr>(~tmp); #endif m_ring[tmp] = frontEvt; } QF_MEM_APP(); QF_CRIT_EXIT(); noexcept #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); QEvt const * const e = m_frontEvt; // always remove evt from the front #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(411, Q_PTR2UINT_CAST_(e) == static_cast<std::uintptr_t>(~m_frontEvt_dis)); #endif // ndef Q_UNSAFE if (e != nullptr) { // was the queue not empty? QEQueueCtr tmp = m_nFree; // get volatile into temporary #ifndef Q_UNSAFE QEQueueCtr const dis = static_cast<QEQueueCtr>(~m_nFree_dis); Q_INVARIANT_INCRIT(412, tmp == dis); #endif // ndef Q_UNSAFE ++tmp; // one more free event in the queue m_nFree = tmp; // update the # free #ifndef Q_UNSAFE m_nFree_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE // any events in the ring buffer? if (tmp <= m_end) { QS_BEGIN_PRE(QS_QF_EQUEUE_GET, qsId) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this queue object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_EQC_PRE(tmp); // # free entries QS_END_PRE() tmp = m_tail; // get volatile into temporary QEvt const * const frontEvt = m_ring[tmp]; #ifndef Q_UNSAFE Q_ASSERT_INCRIT(421, frontEvt != nullptr); m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(frontEvt)); #endif // ndef Q_UNSAFE m_frontEvt = frontEvt; // update the original if (tmp == 0U) { // need to wrap the tail? tmp = m_end; } --tmp; // advance the tail (counter-clockwise) m_tail = tmp; // update the original #ifndef Q_UNSAFE m_tail_dis = static_cast<QEQueueCtr>(~tmp); #endif // ndef Q_UNSAFE } else { m_frontEvt = nullptr; // queue becomes empty #ifndef Q_UNSAFE m_frontEvt_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(nullptr)); #endif // ndef Q_UNSAFE // all entries in the queue must be free (+1 for frontEvt) Q_INVARIANT_INCRIT(440, tmp == (m_end + 1U)); QS_BEGIN_PRE(QS_QF_EQUEUE_GET_LAST, qsId) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of this event QS_OBJ_PRE(this); // this queue object QS_2U8_PRE(e->getPoolNum_(), e->refCtr_); QS_END_PRE() } } QF_MEM_APP(); QF_CRIT_EXIT(); return e; const noexcept return m_nFree; const noexcept #ifndef Q_UNSAFE return m_nMin; #else return 0U; #endif const noexcept return m_frontEvt == nullptr; = delete = delete : m_start(nullptr), m_end(nullptr), m_free_head(nullptr), m_blockSize(0U), m_nTot(0U), m_nFree(0U) #ifndef Q_UNSAFE ,m_nMin(0U), m_free_head_dis(static_cast<std::uintptr_t>(~0U)), m_nFree_dis(static_cast<QEQueueCtr>(~0U)) #endif noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(100, poolSto != nullptr); Q_REQUIRE_INCRIT(101, poolSize >= static_cast<std::uint_fast32_t>(sizeof(QFreeBlock))); Q_REQUIRE_INCRIT(102, static_cast<std::uint_fast16_t>(blockSize + sizeof(QFreeBlock)) > blockSize); m_free_head = static_cast<QFreeBlock *>(poolSto); // find # free blocks in a memory block, NO DIVISION m_blockSize = static_cast<QMPoolSize>(2U * sizeof(void *)); std::uint_fast16_t nblocks = 1U; while (m_blockSize < static_cast<QMPoolSize>(blockSize)) { m_blockSize += static_cast<QMPoolSize>(sizeof(QFreeBlock)); ++nblocks; } // the pool buffer must fit at least one rounded-up block Q_ASSERT_INCRIT(110, poolSize >= m_blockSize); // start at the head of the free list QFreeBlock *fb = m_free_head; std::uint32_t nTot = 1U; // the last block already in the list // chain all blocks together in a free-list... for (std::uint_fast32_t size = poolSize - m_blockSize; size >= static_cast<std::uint_fast32_t>(m_blockSize); size -= static_cast<std::uint_fast32_t>(m_blockSize)) { fb->m_next = &fb[nblocks]; // point next link to next block #ifndef Q_UNSAFE fb->m_next_dis = ~Q_PTR2UINT_CAST_(fb->m_next); #endif fb = fb->m_next; // advance to the next block ++nTot; // one more free block in the pool } // dynamic range check #if (QF_MPOOL_CTR_SIZE == 1U) Q_ENSURE_INCRIT(190, nTot < 0xFFU); #elif (QF_MPOOL_CTR_SIZE == 2U) Q_ENSURE_INCRIT(190, nTot < 0xFFFFU); #endif fb->m_next = nullptr; // the last link points to NULL m_nTot = static_cast<QMPoolCtr>(nTot); m_nFree = m_nTot; // all blocks are free m_start = static_cast<QFreeBlock *>(poolSto); // original start m_end = fb; // the last block in this pool #ifndef Q_UNSAFE m_free_head_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(m_free_head)); m_nFree_dis = static_cast<QMPoolCtr>(~m_nFree); m_nMin = m_nTot; // the minimum # free blocks fb->m_next_dis = ~Q_PTR2UINT_CAST_(fb->m_next); #endif QF_MEM_APP(); QF_CRIT_EXIT(); noexcept #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // get volatile into temporaries QFreeBlock *fb = m_free_head; QMPoolCtr nFree = m_nFree; #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(301, Q_PTR2UINT_CAST_(fb) == static_cast<std::uintptr_t>(~m_free_head_dis)); Q_INVARIANT_INCRIT(302, nFree == static_cast<QMPoolCtr>(~m_nFree_dis)); #endif // ndef Q_UNSAFE // have more free blocks than the requested margin? if (nFree > static_cast<QMPoolCtr>(margin)) { Q_ASSERT_INCRIT(310, fb != nullptr); QFreeBlock * const fb_next = fb->m_next; #ifndef Q_UNSAFE // the free block must have integrity (duplicate inverse storage) Q_INVARIANT_INCRIT(311, Q_PTR2UINT_CAST_(fb_next) == static_cast<std::uintptr_t>(~fb->m_next_dis)); #endif // ndef Q_UNSAFE --nFree; // one less free block if (nFree == 0U) { // is the pool becoming empty? // pool is becoming empty, so the next free block must be NULL Q_ASSERT_INCRIT(320, fb_next == nullptr); m_nFree = 0U; #ifndef Q_UNSAFE m_nFree_dis = static_cast<QMPoolCtr>(~m_nFree); m_nMin = 0U; // remember that the pool got empty #endif // ndef Q_UNSAFE } else { m_nFree = nFree; // update the original #ifndef Q_UNSAFE m_nFree_dis = static_cast<QMPoolCtr>(~nFree); // The pool is not empty, so the next free-block pointer // must be in range. Q_INVARIANT_INCRIT(330, QF_PTR_RANGE_(fb_next, m_start, m_end)); // is the # free blocks the new minimum so far? if (m_nMin > nFree) { m_nMin = nFree; // remember the minimum so far } #endif // ndef Q_UNSAFE } m_free_head = fb_next; // set the head to the next free block #ifndef Q_UNSAFE m_free_head_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(fb_next)); #endif // ndef Q_UNSAFE QS_BEGIN_PRE(QS_QF_MPOOL_GET, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this memory pool QS_MPC_PRE(nFree); // # of free blocks in the pool #ifndef Q_UNSAFE QS_MPC_PRE(m_nMin); // min # free blocks ever in the pool #else QS_MPC_PRE(0U); // min # free blocks (not available) #endif // ndef Q_UNSAFE QS_END_PRE() } else { // don't have enough free blocks at this point fb = nullptr; QS_BEGIN_PRE(QS_QF_MPOOL_GET_ATTEMPT, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this memory pool QS_MPC_PRE(nFree); // # of free blocks in the pool QS_MPC_PRE(margin); // the requested margin QS_END_PRE() } QF_MEM_APP(); QF_CRIT_EXIT(); return fb; // return the block or nullptr to the caller noexcept #ifndef Q_SPY Q_UNUSED_PAR(qsId); #endif QFreeBlock * const fb = static_cast<QFreeBlock *>(block); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // get volatile into temporaries QFreeBlock * const free_head = m_free_head; QMPoolCtr nFree = m_nFree; #ifndef Q_UNSAFE Q_INVARIANT_INCRIT(401, Q_PTR2UINT_CAST_(free_head) == static_cast<std::uintptr_t>(~m_free_head_dis)); Q_INVARIANT_INCRIT(402, nFree == static_cast<QMPoolCtr>(~m_nFree_dis)); Q_REQUIRE_INCRIT(410, nFree < m_nTot); Q_REQUIRE_INCRIT(411, QF_PTR_RANGE_(fb, m_start, m_end)); // the block must not be in the pool already Q_REQUIRE_INCRIT(412, Q_PTR2UINT_CAST_(fb->m_next) != static_cast<std::uintptr_t>(~fb->m_next_dis)); #endif // ndef Q_UNSAFE ++nFree; // one more free block in this pool m_free_head = fb; // set as new head of the free list m_nFree = nFree; fb->m_next = free_head; // link into the list #ifndef Q_UNSAFE m_free_head_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(fb)); m_nFree_dis = static_cast<QMPoolCtr>(~nFree); fb->m_next_dis = static_cast<std::uintptr_t>(~Q_PTR2UINT_CAST_(free_head)); #endif QS_BEGIN_PRE(QS_QF_MPOOL_PUT, qsId) QS_TIME_PRE(); // timestamp QS_OBJ_PRE(this); // this memory pool QS_MPC_PRE(nFree); // the # free blocks in the pool QS_END_PRE() QF_MEM_APP(); QF_CRIT_EXIT(); const noexcept return m_blockSize; const noexcept #ifndef Q_UNSAFE return m_nMin; #else return 0U; #endif const noexcept return m_nFree; = delete = delete noexcept noexcept noexcept std::uint8_t *ptr = static_cast<std::uint8_t *>(start); for (std::uint_fast16_t n = len; n > 0U; --n) { *ptr = 0U; ++ptr; } noexcept //! @deprecated QActive::psInit(subscrSto, maxSignal); noexcept //! @deprecated QActive::publish_(e, sender, qsId); noexcept //! @deprecated QTimeEvt::tick(tickRate, sender); noexcept //! @deprecated return QActive::getQueueMin(prio); {0xFFFFU}; noexcept std::uint_fast8_t const poolNum = priv_.maxPool_; // see precondition{qf_dyn,200} and precondition{qf_dyn,201} QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(200, poolNum < QF_MAX_EPOOL); if (poolNum > 0U) { Q_REQUIRE_INCRIT(201, QF_EPOOL_EVENT_SIZE_(priv_.ePool_[poolNum - 1U]) < evtSize); } priv_.maxPool_ = poolNum + 1U; // one more pool QF_MEM_APP(); QF_CRIT_EXIT(); // perform the port-dependent initialization of the event-pool QF_EPOOL_INIT_(priv_.ePool_[poolNum], poolSto, poolSize, evtSize); #ifdef Q_SPY // generate the object-dictionary entry for the initialized pool { std::uint8_t obj_name[9] = "EvtPool?"; obj_name[7] = static_cast<std::uint8_t>( static_cast<std::uint8_t>('0') + static_cast<std::uint8_t>(poolNum + 1U)); QS::obj_dict_pre_(&priv_.ePool_[poolNum], reinterpret_cast<char *>(&obj_name[0])); } #endif // Q_SPY noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); std::uint_fast16_t const max_size = QF_EPOOL_EVENT_SIZE_(priv_.ePool_[priv_.maxPool_ - 1U]); QF_MEM_APP(); QF_CRIT_EXIT(); return max_size; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(400, (poolNum <= QF_MAX_EPOOL) && (0U < poolNum) && (poolNum <= priv_.maxPool_)); std::uint_fast16_t const min = static_cast<std::uint_fast16_t>( priv_.ePool_[poolNum - 1U].getNMin()); QF_MEM_APP(); QF_CRIT_EXIT(); return min; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); // find the pool id that fits the requested event size... std::uint_fast8_t poolNum = 0U; // zero-based poolNum initially for (; poolNum < priv_.maxPool_; ++poolNum) { if (evtSize <= QF_EPOOL_EVENT_SIZE_(priv_.ePool_[poolNum])) { break; } } // precondition: // - cannot run out of registered pools Q_REQUIRE_INCRIT(300, poolNum < priv_.maxPool_); ++poolNum; // convert to 1-based poolNum QF_MEM_APP(); QF_CRIT_EXIT(); // get event e (port-dependent)... QEvt *e; #ifdef Q_SPY QF_EPOOL_GET_(priv_.ePool_[poolNum - 1U], e, ((margin != NO_MARGIN) ? margin : 0U), static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum); #else QF_EPOOL_GET_(priv_.ePool_[poolNum - 1U], e, ((margin != NO_MARGIN) ? margin : 0U), 0U); #endif if (e != nullptr) { // was e allocated correctly? e->sig = static_cast<QSignal>(sig); // set the signal e->evtTag_ = static_cast<std::uint8_t>((poolNum << 4U) | 0x0FU); e->refCtr_ = 0U; // initialize the reference counter to 0 QS_CRIT_ENTRY(); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_NEW, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_EVS_PRE(evtSize); // the size of the event QS_SIG_PRE(sig); // the signal of the event QS_END_PRE() QS_MEM_APP(); QS_CRIT_EXIT(); } else { // event was not allocated QF_CRIT_ENTRY(); // This assertion means that the event allocation failed, // and this failure cannot be tolerated. The most frequent // reason is an event leak in the application. Q_ASSERT_INCRIT(320, margin != NO_MARGIN); QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_NEW_ATTEMPT, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_EVS_PRE(evtSize); // the size of the event QS_SIG_PRE(sig); // the signal of the event QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); } // the returned event e is guaranteed to be valid (not NULL) // if we can't tolerate failed allocation return e; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(400, e != nullptr); Q_INVARIANT_INCRIT(401, e->verify_()); std::uint_fast8_t const poolNum = e->getPoolNum_(); if (poolNum != 0U) { // is it a pool event (mutable)? QF_MEM_SYS(); if (e->refCtr_ > 1U) { // isn't this the last reference? QS_BEGIN_PRE(QS_QF_GC_ATTEMPT, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(poolNum, e->refCtr_); QS_END_PRE() QEvt_refCtr_dec_(e); // decrement the ref counter QF_MEM_APP(); QF_CRIT_EXIT(); } else { // this is the last reference to this event, recycle it QS_BEGIN_PRE(QS_QF_GC, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(poolNum, e->refCtr_); QS_END_PRE() // pool number must be in range Q_ASSERT_INCRIT(410, (poolNum <= priv_.maxPool_) && (poolNum <= QF_MAX_EPOOL)); QF_MEM_APP(); QF_CRIT_EXIT(); // NOTE: casting 'const' away is legit because it's a pool event #ifdef Q_SPY QF_EPOOL_PUT_(priv_.ePool_[poolNum - 1U], QF_CONST_CAST_(QEvt*, e), static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum); #else QF_EPOOL_PUT_(priv_.ePool_[poolNum - 1U], QF_CONST_CAST_(QEvt*, e), 0U); #endif } } else { QF_CRIT_EXIT(); } noexcept #ifdef Q_UNSAFE Q_UNUSED_PAR(evtRef); #endif QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(500, e != nullptr); Q_INVARIANT_INCRIT(501, e->verify_()); std::uint_fast8_t const poolNum = e->getPoolNum_(); Q_UNUSED_PAR(poolNum); // might be unused Q_REQUIRE_INCRIT(501, (poolNum != 0U) && (evtRef == nullptr)); QEvt_refCtr_inc_(e); // increments the ref counter QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_NEW_REF, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(poolNum, e->refCtr_); QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); return e; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QEvt const * const e = evtRef; Q_REQUIRE_INCRIT(600, e != nullptr); Q_INVARIANT_INCRIT(601, e->verify_()); #ifdef Q_SPY std::uint_fast8_t const poolNum = e->getPoolNum_(); #endif QS_MEM_SYS(); QS_BEGIN_PRE(QS_QF_DELETE_REF, static_cast<std::uint_fast8_t>(QS_EP_ID) + poolNum) QS_TIME_PRE(); // timestamp QS_SIG_PRE(e->sig); // the signal of the event QS_2U8_PRE(poolNum, e->refCtr_); QS_END_PRE() QS_MEM_APP(); QF_CRIT_EXIT(); #if (QF_MAX_EPOOL > 0U) gc(e); // recycle the referenced event #endif return static_cast<evtT_*>( QP::QF::newX_(sizeof(evtT_), QP::QF::NO_MARGIN, sig)); evtT_ *e = static_cast<evtT_*>( QP::QF::newX_(sizeof(evtT_), QP::QF::NO_MARGIN, sig)); e->init(args...); // e cannot be nullptr return e; return static_cast<evtT_*>(QP::QF::newX_(sizeof(evtT_), margin, sig)); evtT_ *e = static_cast<evtT_*>(QP::QF::newX_(sizeof(evtT_), margin, sig)); if (e != nullptr) { e->init(args...); } return e; evtRef = static_cast<evtT_ const *>(QP::QF::newRef_(e, evtRef)); QP::QF::deleteRef_(evtRef); evtRef = nullptr; noexcept noexcept \ (static_cast<QP::QPrioSpec>((prio_) | (pthre_) << 8U)) (QP::QF::q_new<evtT_>((sig_))) (QP::QF::q_new<evtT_>((sig_), __VA_ARGS__)) (QP::QF::q_new_x<evtT_>((margin_), (sig_))) (QP::QF::q_new_x<evtT_>((margin_), (sig_), __VA_ARGS__)) (QP::QF::q_new_ref<evtT_>(e, (evtRef_))) do { \ QP::QF::deleteRef_((evtRef_)); \ (evtRef_) = nullptr; \ } while (false) \ publish_((e_), (sender_), (sender_)->getPrio()) publish_((e_), nullptr, 0U) post_((e_), QP::QF::NO_MARGIN, (sender_)) post_((e_), QP::QF::NO_MARGIN, nullptr) \ post_((e_), (margin_), (sender_)) post_((e_), (margin_), nullptr) tick((tickRate_), (sender_)) tick((tickRate_), nullptr) TICK_X(0U, (sender_)) trig_((sender_)) trig_(nullptr) (static_cast<void>(0)) (static_cast<void>(0)) (static_cast<void>(0)) Native QF event pool QMPool Native QF event pool initialization \ (p_).init((poolSto_), (poolSize_), (evtSize_)) Native QF event pool event-size getter ((p_).getBlockSize()) Native QF event pool get-event \ ((e_) = static_cast<QEvt *>((p_).get((m_), (qsId_)))) Native QF event pool put-event ((p_).put((e_), (qsId_))) QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_INVARIANT_INCRIT(102, priv_.schedCeil == static_cast<std::uint_fast8_t>(~priv_.schedCeil_dis)); if (ceiling > priv_.schedCeil) { // raising the scheduler ceiling? QS_BEGIN_PRE(QS_SCHED_LOCK, 0U) QS_TIME_PRE(); // timestamp // the previous sched ceiling & new sched ceiling QS_2U8_PRE(static_cast<std::uint8_t>(priv_.schedCeil), static_cast<std::uint8_t>(ceiling)); QS_END_PRE() priv_.schedCeil = ceiling; #ifndef Q_UNSAFE priv_.schedCeil_dis = static_cast<std::uint_fast8_t>(~ceiling); #endif } QF_MEM_APP(); QF_CRIT_EXIT(); QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_INVARIANT_INCRIT(202, priv_.schedCeil == static_cast<std::uint_fast8_t>(~priv_.schedCeil_dis)); if (priv_.schedCeil != 0U) { // actually enabling the scheduler? QS_BEGIN_PRE(QS_SCHED_UNLOCK, 0U) QS_TIME_PRE(); // timestamp // current sched ceiling (old), previous sched ceiling (new) QS_2U8_PRE(static_cast<std::uint8_t>(priv_.schedCeil), 0U); QS_END_PRE() priv_.schedCeil = 0U; #ifndef Q_UNSAFE priv_.schedCeil_dis = ~static_cast<std::uint_fast8_t>(0U); #endif } QF_MEM_APP(); QF_CRIT_EXIT(); bzero_(&QF::priv_, sizeof(QF::priv_)); bzero_(&QV::priv_, sizeof(QV::priv_)); bzero_(&QActive::registry_[0], sizeof(QActive::registry_)); #ifndef Q_UNSAFE QV::priv_.readySet.update_(&QV::priv_.readySet_dis); QV::priv_.schedCeil_dis = ~static_cast<std::uint_fast8_t>(0U); #endif #ifdef QV_INIT QV_INIT(); // port-specific initialization of the QV kernel #endif onCleanup(); // cleanup callback // nothing else to do for the QV kernel #ifdef Q_SPY // produce the QS_QF_RUN trace record QF_INT_DISABLE(); QF_MEM_SYS(); QS::beginRec_(QS_REC_NUM_(QS_QF_RUN)); QS::endRec_(); QF_MEM_APP(); QF_INT_ENABLE(); #endif // Q_SPY onStartup(); // application-specific startup callback QF_INT_DISABLE(); QF_MEM_SYS(); #ifdef QV_START QV_START(); // port-specific startup of the QV kernel #endif #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) std::uint_fast8_t pprev = 0U; // previous prio. #ifdef QF_ON_CONTEXT_SW // officially switch to the idle cotext QF_onContextSw(nullptr, nullptr); #endif // def QF_ON_CONTEXT_SW #endif // (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) for (;;) { // QV event loop... // check internal integrity (duplicate inverse storage) Q_INVARIANT_INCRIT(302, QV::priv_.readySet.verify_(&QV::priv_.readySet_dis)); // check internal integrity (duplicate inverse storage) Q_INVARIANT_INCRIT(303, QV::priv_.schedCeil == static_cast<std::uint_fast8_t>(~QV::priv_.schedCeil_dis)); // find the maximum prio. AO ready to run std::uint_fast8_t const p = (QV::priv_.readySet.notEmpty() ? QV::priv_.readySet.findMax() : 0U); if (p > QV::priv_.schedCeil) { // is it above the sched ceiling? QActive * const a = QActive::registry_[p]; #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) QS_BEGIN_PRE(QS_SCHED_NEXT, p) QS_TIME_PRE(); // timestamp QS_2U8_PRE(static_cast<std::uint8_t>(p), static_cast<std::uint8_t>(pprev)); QS_END_PRE() #ifdef QF_ON_CONTEXT_SW QF_onContextSw(((pprev != 0U) ? QActive::registry_[pprev] : nullptr), a); #endif // QF_ON_CONTEXT_SW pprev = p; // update previous prio. #endif // (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) QF_MEM_APP(); QF_INT_ENABLE(); QEvt const * const e = a->get_(); // NOTE QActive::get_() performs QF_MEM_APP() before return // dispatch event (virtual call) a->dispatch(e, a->getPrio()); #if (QF_MAX_EPOOL > 0U) gc(e); #endif QF_INT_DISABLE(); QF_MEM_SYS(); if (a->getEQueue().isEmpty()) { // empty queue? QV::priv_.readySet.remove(p); #ifndef Q_UNSAFE QV::priv_.readySet.update_(&QV::priv_.readySet_dis); #endif } } else { // no AO ready to run --> idle #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) if (pprev != 0U) { QS_BEGIN_PRE(QS_SCHED_IDLE, pprev) QS_TIME_PRE(); // timestamp QS_U8_PRE(static_cast<std::uint8_t>(pprev)); QS_END_PRE() #ifdef QF_ON_CONTEXT_SW QF_onContextSw(QActive::registry_[pprev], nullptr); #endif // QF_ON_CONTEXT_SW pprev = 0U; // update previous prio } #endif // (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) QF_MEM_APP(); // QV::onIdle() must be called with interrupts DISABLED // because the determination of the idle condition (all event // queues empty) can change at any time by an interrupt posting // events to a queue. // // NOTE: QV::onIdle() MUST enable interrupts internally, // ideally at the same time as putting the CPU into a power- // saving mode. QV::onIdle(); QF_INT_DISABLE(); QF_MEM_SYS(); } } #ifdef __GNUC__ // GNU compiler? return 0; #endif Q_UNUSED_PAR(stkSto); // not needed in QV Q_UNUSED_PAR(stkSize); // not needed in QV QF_CRIT_STAT QF_CRIT_ENTRY(); Q_REQUIRE_INCRIT(300, stkSto == nullptr); QF_CRIT_EXIT(); m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio. m_pthre = 0U; // not used register_(); // make QF aware of this AO m_eQueue.init(qSto, qLen); // initialize QEQueue of this AO this->init(par, m_prio); // take the top-most initial tran. (virtual) QS_FLUSH(); // flush the trace buffer to the host (static_cast<void>(0)) (static_cast<void>(0)) (static_cast<void>(0)) \ QV::priv_.readySet.insert((me_)->m_prio); \ QV::priv_.readySet.update_(&QV::priv_.readySet_dis) \ (QV::priv_.readySet.insert((me_)->m_prio)) = std::uint_fast8_t; noexcept QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(100, !QK_ISR_CONTEXT_()); Q_INVARIANT_INCRIT(102, QK_priv_.lockCeil == static_cast<std::uint_fast8_t>(~QK_priv_.lockCeil_dis)); // first store the previous lock prio QSchedStatus stat; if (ceiling > QK_priv_.lockCeil) { // raising the lock ceiling? QS_BEGIN_PRE(QS_SCHED_LOCK, QK_priv_.actPrio) QS_TIME_PRE(); // timestamp // the previous lock ceiling & new lock ceiling QS_2U8_PRE(static_cast<std::uint8_t>(QK_priv_.lockCeil), static_cast<std::uint8_t>(ceiling)); QS_END_PRE() // previous status of the lock stat = static_cast<QSchedStatus>(QK_priv_.lockCeil); // new status of the lock QK_priv_.lockCeil = ceiling; #ifndef Q_UNSAFE QK_priv_.lockCeil_dis = static_cast<std::uint_fast8_t>(~ceiling); #endif } else { stat = 0xFFU; // scheduler not locked } QF_MEM_APP(); QF_CRIT_EXIT(); return stat; // return the status to be saved in a stack variable noexcept // has the scheduler been actually locked by the last QK::schedLock()? if (prevCeil != 0xFFU) { QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_INVARIANT_INCRIT(202, QK_priv_.lockCeil == static_cast<std::uint_fast8_t>(~QK_priv_.lockCeil_dis)); Q_REQUIRE_INCRIT(210, (!QK_ISR_CONTEXT_()) && (QK_priv_.lockCeil > prevCeil)); QS_BEGIN_PRE(QS_SCHED_UNLOCK, QK_priv_.actPrio) QS_TIME_PRE(); // timestamp // current lock ceiling (old), previous lock ceiling (new) QS_2U8_PRE(static_cast<std::uint8_t>(QK_priv_.lockCeil), static_cast<std::uint8_t>(prevCeil)); QS_END_PRE() // restore the previous lock ceiling QK_priv_.lockCeil = prevCeil; #ifndef Q_UNSAFE QK_priv_.lockCeil_dis = static_cast<std::uint_fast8_t>(~prevCeil); #endif // find if any AOs should be run after unlocking the scheduler if (QK_sched_() != 0U) { // preemption needed? QK_activate_(); // activate any unlocked AOs } QF_MEM_APP(); QF_CRIT_EXIT(); } bzero_(&QF::priv_, sizeof(QF::priv_)); bzero_(&QK_priv_, sizeof(QK_priv_)); bzero_(&QActive::registry_[0], sizeof(QActive::registry_)); // setup the QK scheduler as initially locked and not running QK_priv_.lockCeil = (QF_MAX_ACTIVE + 1U); // scheduler locked #ifndef Q_UNSAFE QK_priv_.readySet.update_(&QK_priv_.readySet_dis); QK_priv_.actPrio_dis = static_cast<std::uint_fast8_t>(~QK_priv_.actPrio); QK_priv_.nextPrio_dis = static_cast<std::uint_fast8_t>(~QK_priv_.nextPrio); QK_priv_.actThre_dis = static_cast<std::uint_fast8_t>(~QK_priv_.actThre); QK_priv_.lockCeil_dis = static_cast<std::uint_fast8_t>(~QK_priv_.lockCeil); #endif #ifdef QK_INIT QK_INIT(); // port-specific initialization of the QK kernel #endif onCleanup(); // cleanup callback // nothing else to do for the QK preemptive kernel #ifdef Q_SPY // produce the QS_QF_RUN trace record QF_INT_DISABLE(); QF_MEM_SYS(); QS::beginRec_(QS_REC_NUM_(QS_QF_RUN)); QS::endRec_(); QF_MEM_APP(); QF_INT_ENABLE(); #endif // Q_SPY onStartup(); // application-specific startup callback QF_INT_DISABLE(); QF_MEM_SYS(); #ifdef QK_START QK_START(); // port-specific startup of the QK kernel #endif QK_priv_.lockCeil = 0U; // unlock the QK scheduler #ifndef Q_UNSAFE QK_priv_.lockCeil_dis = static_cast<std::uint_fast8_t>(~QK_priv_.lockCeil); #endif #ifdef QF_ON_CONTEXT_SW // officially switch to the idle context QF_onContextSw(nullptr, QActive::registry_[QK_priv_.nextPrio]); #endif // activate AOs to process events posted so far if (QK_sched_() != 0U) { QK_activate_(); } QF_MEM_APP(); QF_INT_ENABLE(); for (;;) { // QK idle loop... QK::onIdle(); // application-specific QK on-idle callback } #ifdef __GNUC__ // GNU compiler? return 0; #endif Q_UNUSED_PAR(stkSto); // not needed in QK Q_UNUSED_PAR(stkSize); // not needed in QK QF_CRIT_STAT QF_CRIT_ENTRY(); QF_MEM_SYS(); Q_REQUIRE_INCRIT(300, (!QK_ISR_CONTEXT_()) && (stkSto == nullptr)); QF_MEM_APP(); QF_CRIT_EXIT(); m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio. m_pthre = static_cast<std::uint8_t>(prioSpec >> 8U); // preemption-thre. register_(); // make QF aware of this AO m_eQueue.init(qSto, qLen); // init the built-in queue // top-most initial tran. (virtual call) this->init(par, m_prio); QS_FLUSH(); // flush the trace buffer to the host // See if this AO needs to be scheduled if QK is already running QF_CRIT_ENTRY(); QF_MEM_SYS(); if (QK_sched_() != 0U) { // activation needed? QK_activate_(); } QF_MEM_APP(); QF_CRIT_EXIT(); noexcept // NOTE: this function is entered with interrupts DISABLED Q_INVARIANT_INCRIT(402, QK_priv_.readySet.verify_(&QK_priv_.readySet_dis)); std::uint_fast8_t p; if (QK_priv_.readySet.isEmpty()) { p = 0U; // no activation needed } else { // find the highest-prio AO with non-empty event queue p = QK_priv_.readySet.findMax(); Q_INVARIANT_INCRIT(412, QK_priv_.actThre == static_cast<std::uint_fast8_t>(~QK_priv_.actThre_dis)); // is the AO's prio. below the active preemption-threshold? if (p <= QK_priv_.actThre) { p = 0U; // no activation needed } else { Q_INVARIANT_INCRIT(422, QK_priv_.lockCeil == static_cast<std::uint_fast8_t>(~QK_priv_.lockCeil_dis)); // is the AO's prio. below the lock-ceiling? if (p <= QK_priv_.lockCeil) { p = 0U; // no activation needed } else { Q_INVARIANT_INCRIT(432, QK_priv_.nextPrio == static_cast<std::uint_fast8_t>(~QK_priv_.nextPrio_dis)); QK_priv_.nextPrio = p; // next AO to run #ifndef Q_UNSAFE QK_priv_.nextPrio_dis = static_cast<std::uint_fast8_t>(~QK_priv_.nextPrio); #endif } } } return p; noexcept // NOTE: this function is entered with interrupts DISABLED std::uint_fast8_t const prio_in = QK_priv_.actPrio; // save initial prio. std::uint_fast8_t p = QK_priv_.nextPrio; // next prio to run Q_INVARIANT_INCRIT(502, (prio_in == static_cast<std::uint_fast8_t>(~QK_priv_.actPrio_dis)) && (p == static_cast<std::uint_fast8_t>(~QK_priv_.nextPrio_dis))); Q_REQUIRE_INCRIT(510, (prio_in <= QF_MAX_ACTIVE) && (0U < p) && (p <= QF_MAX_ACTIVE)); #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) std::uint_fast8_t pprev = prio_in; #endif // QF_ON_CONTEXT_SW || Q_SPY QK_priv_.nextPrio = 0U; // clear for the next time #ifndef Q_UNSAFE QK_priv_.nextPrio_dis = static_cast<std::uint_fast8_t>(~QK_priv_.nextPrio); #endif std::uint_fast8_t pthre_in; QP::QActive *a; if (prio_in == 0U) { // preempting the idle thread? pthre_in = 0U; } else { a = QP::QActive::registry_[prio_in]; Q_ASSERT_INCRIT(510, a != nullptr); pthre_in = static_cast<std::uint_fast8_t>(a->getPThre()); Q_INVARIANT_INCRIT(511, pthre_in == static_cast<std::uint_fast8_t>( ~static_cast<std::uint_fast8_t>(a->m_pthre_dis) & 0xFFU)); } // loop until no more ready-to-run AOs of higher pthre than the initial do { a = QP::QActive::registry_[p]; // obtain the pointer to the AO Q_ASSERT_INCRIT(520, a != nullptr); // the AO must be registered std::uint_fast8_t const pthre = static_cast<std::uint_fast8_t>(a->getPThre()); Q_INVARIANT_INCRIT(522, pthre == static_cast<std::uint_fast8_t>( ~static_cast<std::uint_fast8_t>(a->m_pthre_dis) & 0xFFU)); // set new active prio. and preemption-threshold QK_priv_.actPrio = p; QK_priv_.actThre = pthre; #ifndef Q_UNSAFE QK_priv_.actPrio_dis = static_cast<std::uint_fast8_t>(~p); QK_priv_.actThre_dis = static_cast<std::uint_fast8_t>(~pthre); #endif #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) if (p != pprev) { // changing threads? QS_BEGIN_PRE(QP::QS_SCHED_NEXT, p) QS_TIME_PRE(); // timestamp QS_2U8_PRE(p, // prio. of the scheduled AO pprev); // previous prio. QS_END_PRE() #ifdef QF_ON_CONTEXT_SW QF_onContextSw(QP::QActive::registry_[pprev], a); #endif // QF_ON_CONTEXT_SW pprev = p; // update previous prio. } #endif // QF_ON_CONTEXT_SW || Q_SPY QF_MEM_APP(); QF_INT_ENABLE(); // unconditionally enable interrupts QP::QEvt const * const e = a->get_(); // NOTE QActive_get_() performs QF_MEM_APP() before return // dispatch event (virtual call) a->dispatch(e, a->getPrio()); #if (QF_MAX_EPOOL > 0U) QP::QF::gc(e); #endif // determine the next highest-prio. AO ready to run... QF_INT_DISABLE(); // unconditionally disable interrupts QF_MEM_SYS(); // internal integrity check (duplicate inverse storage) Q_INVARIANT_INCRIT(532, QK_priv_.readySet.verify_(&QK_priv_.readySet_dis)); if (a->getEQueue().isEmpty()) { // empty queue? QK_priv_.readySet.remove(p); #ifndef Q_UNSAFE QK_priv_.readySet.update_(&QK_priv_.readySet_dis); #endif } if (QK_priv_.readySet.isEmpty()) { p = 0U; // no activation needed } else { // find new highest-prio AO ready to run... p = QK_priv_.readySet.findMax(); // is the new prio. below the initial preemption-threshold? if (p <= pthre_in) { p = 0U; // no activation needed } else { Q_INVARIANT_INCRIT(542, QK_priv_.lockCeil == ~QK_priv_.lockCeil_dis); // is the AO's prio. below the lock preemption-threshold? if (p <= QK_priv_.lockCeil) { p = 0U; // no activation needed } else { Q_ASSERT_INCRIT(550, p <= QF_MAX_ACTIVE); } } } } while (p != 0U); // restore the active prio. and preemption-threshold QK_priv_.actPrio = prio_in; QK_priv_.actThre = pthre_in; #ifndef Q_UNSAFE QK_priv_.actPrio_dis = static_cast<std::uint_fast8_t>(~QK_priv_.actPrio); QK_priv_.actThre_dis = static_cast<std::uint_fast8_t>(~QK_priv_.actThre); #endif #if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) if (prio_in != 0U) { // resuming an active object? a = QP::QActive::registry_[prio_in]; // pointer to preempted AO QS_BEGIN_PRE(QP::QS_SCHED_NEXT, prio_in) QS_TIME_PRE(); // timestamp // prio. of the resumed AO, previous prio. QS_2U8_PRE(prio_in, pprev); QS_END_PRE() } else { // resuming prio.==0 --> idle a = nullptr; // QK idle loop QS_BEGIN_PRE(QP::QS_SCHED_IDLE, pprev) QS_TIME_PRE(); // timestamp QS_U8_PRE(pprev); // previous prio. QS_END_PRE() } #ifdef QF_ON_CONTEXT_SW QF_onContextSw(QP::QActive::registry_[pprev], a); #endif // QF_ON_CONTEXT_SW #endif // QF_ON_CONTEXT_SW || Q_SPY QSchedStatus lockStat_; do { \ if (QK_ISR_CONTEXT_()) { \ lockStat_ = 0xFFU; \ } else { \ lockStat_ = QK::schedLock((ceil_)); \ } \ } while (false) do { \ if (lockStat_ != 0xFFU) { \ QK::schedUnlock(lockStat_); \ } \ } while (false) (static_cast<void>(0)) do { \ QK_priv_.readySet.insert( \ static_cast<std::uint_fast8_t>((me_)->m_prio)); \ QK_priv_.readySet.update_(&QK_priv_.readySet_dis); \ if (!QK_ISR_CONTEXT_()) { \ if (QK_sched_() != 0U) { \ QK_activate_(); \ } \ } \ } while (false) do { \ QK_priv_.readySet.insert( \ static_cast<std::uint_fast8_t>((me_)->m_prio)); \ if (!QK_ISR_CONTEXT_()) { \ if (QK_sched_() != 0U) { \ QK_activate_(); \ } \ } \ } while (false) #ifndef QP_HPP_ #define QP_HPP_ //============================================================================ #define QP_VERSION_STR "8.0.1" #define QP_VERSION 801U #define QP_RELEASE 0x703931CEU //============================================================================ //! @cond INTERNAL #ifndef Q_SIGNAL_SIZE #define Q_SIGNAL_SIZE 2U #endif #ifndef QF_MAX_ACTIVE #define QF_MAX_ACTIVE 32U #endif #if (QF_MAX_ACTIVE > 64U) #error QF_MAX_ACTIVE exceeds the maximum of 64U; #endif #ifndef QF_MAX_TICK_RATE #define QF_MAX_TICK_RATE 1U #endif #if (QF_MAX_TICK_RATE > 15U) #error QF_MAX_TICK_RATE exceeds the maximum of 15U; #endif #ifndef QF_MAX_EPOOL #define QF_MAX_EPOOL 3U #endif #if (QF_MAX_EPOOL > 15U) #error QF_MAX_EPOOL exceeds the maximum of 15U; #endif #ifndef QF_TIMEEVT_CTR_SIZE #define QF_TIMEEVT_CTR_SIZE 4U #endif #if (QF_TIMEEVT_CTR_SIZE > 4U) #error QF_TIMEEVT_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U; #endif #ifndef QF_EVENT_SIZ_SIZE #define QF_EVENT_SIZ_SIZE 2U #endif #if (QF_EVENT_SIZ_SIZE > 4U) #error QF_EVENT_SIZ_SIZE defined incorrectly, expected 1U, 2U, or 4U; #endif //! @endcond //============================================================================ $declare ${glob-types} $declare ${QEP} $declare ${QEP-macros} $declare ${QF::types} $declare ${QF::QActive} $declare ${QF::QMActive} $declare ${QF::QTimeEvt} $declare ${QF::QTicker} $declare ${QF::QF-base} $declare ${QF::QF-dyn} extern "C" { $declare ${QF-extern-C} } // extern "C" $declare ${QF-macros} #endif // QP_HPP_ #ifndef QP_PKG_HPP_ #define QP_PKG_HPP_ $declare ${QF::QF-pkg} #define QF_CONST_CAST_(type_, ptr_) const_cast<type_>(ptr_) #define Q_PTR2UINT_CAST_(ptr_) (reinterpret_cast<std::uintptr_t>(ptr_)) #define QF_PTR_RANGE_(x_, min_, max_) (((min_) <= (x_)) && ((x_) <= (max_))) namespace QP { // Bitmasks are for the QTimeEvt::flags attribute constexpr std::uint8_t QTE_FLAG_IS_LINKED {1U << 7U}; constexpr std::uint8_t QTE_FLAG_WAS_DISARMED {1U << 6U}; inline void QEvt_refCtr_inc_(QEvt const * const e) noexcept { std::uint8_t rc = e->refCtr_ + 1U; (QF_CONST_CAST_(QEvt*, e))->refCtr_ = rc; // cast away 'const' #ifndef Q_UNSAFE (QF_CONST_CAST_(QEvt*, e))->evtTag_ = (e->evtTag_ & 0xF0U) | ((~rc) & 0x0FU); #endif } inline void QEvt_refCtr_dec_(QEvt const * const e) noexcept { std::uint8_t rc = e->refCtr_ - 1U; (QF_CONST_CAST_(QEvt*, e))->refCtr_ = rc; // cast away 'const' #ifndef Q_UNSAFE (QF_CONST_CAST_(QEvt*, e))->evtTag_ = (e->evtTag_ & 0xF0U) | ((~rc) & 0x0FU); #endif } } // namespace QP #endif // QP_PKG_HPP_ #ifndef QEQUEUE_HPP_ #define QEQUEUE_HPP_ #ifndef QF_EQUEUE_CTR_SIZE #define QF_EQUEUE_CTR_SIZE 1U #endif namespace QP { #if (QF_EQUEUE_CTR_SIZE == 1U) using QEQueueCtr = std::uint8_t; #elif (QF_EQUEUE_CTR_SIZE == 2U) using QEQueueCtr = std::uint16_t; #else #error "QF_EQUEUE_CTR_SIZE defined incorrectly, expected 1U or 2U" #endif class QEvt; // forward declaration } // namespace QP $declare ${QF::QEQueue} #endif // QEQUEUE_HPP_ #ifndef QMPOOL_HPP_ #define QMPOOL_HPP_ #ifndef QF_MPOOL_SIZ_SIZE #define QF_MPOOL_SIZ_SIZE 2U #endif #ifndef QF_MPOOL_CTR_SIZE #define QF_MPOOL_CTR_SIZE 2U #endif namespace QP { #if (QF_MPOOL_SIZ_SIZE == 1U) using QMPoolSize = std::uint8_t; #elif (QF_MPOOL_SIZ_SIZE == 2U) using QMPoolSize = std::uint16_t; #elif (QF_MPOOL_SIZ_SIZE == 4U) using QMPoolSize = std::uint32_t; #else #error "QF_MPOOL_SIZ_SIZE defined incorrectly, expected 1U, 2U, or 4U" #endif #if (QF_MPOOL_CTR_SIZE == 1U) using QMPoolCtr = std::uint8_t; #elif (QF_MPOOL_CTR_SIZE == 2U) using QMPoolCtr = std::uint16_t; #elif (QF_MPOOL_CTR_SIZE == 4U) using QMPoolCtr = std::uint32_t; #else #error "QF_MPOOL_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U" #endif } // namespace QP #define QF_MPOOL_EL(evType_) struct { \ QP::QFreeBlock sto_[((sizeof(evType_) - 1U) / (2U * sizeof(void *))) + 1U]; \ } $declare ${QF::QFreeBlock} $declare ${QF::QMPool} #endif // QMPOOL_HPP_ #ifndef QV_HPP_ #define QV_HPP_ $declare ${QV::QV-base} //============================================================================ // interface used only for internal implementation, but not in applications #ifdef QP_IMPL $declare ${QV-impl} $declare ${QF_EPOOL-impl} #endif // QP_IMPL #endif // QV_HPP_ #ifndef QK_HPP_ #define QK_HPP_ $declare ${QK::QSchedStatus} $declare ${QK::QK-base} extern "C" { $declare ${QK-extern-C} } // extern "C" //============================================================================ // interface used only for internal implementation, but not in applications #ifdef QP_IMPL $declare ${QK-impl} $declare ${QF_EPOOL-impl} #endif // QP_IMPL #endif // QK_HPP_ #ifndef QSTAMP_HPP_ #define QSTAMP_HPP_ namespace QP { extern char const BUILD_DATE[12]; extern char const BUILD_TIME[9]; } // namespace QP #endif // QSTAMP_HPP_ #ifndef QPCPP_HPP_ #define QPCPP_HPP_ //============================================================================ #include "qp_port.hpp" // QP port from the port directory #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // software tracing enabled? #include "qs_port.hpp" // QS/C++ port from the port directory #else #include "qs_dummy.hpp" // QS/C++ dummy (inactive) interface #endif //============================================================================ #ifndef QP_API_VERSION #define QP_API_VERSION 0 #endif // QP_API_VERSION // QP API compatibility layer... //============================================================================ #if (QP_API_VERSION < 750) #define QM_SM_STATE_DECL(subm_, state_) error "submachines no longer supported" #define qm_super_sub(sm_state_) error "submachines no longer supported" #define qm_tran_ep(tatbl_) error "submachines no longer supported" #define qm_tran_xp(xp_, tatbl_) error "submachines no longer supported" #define qm_sm_exit(sm_state_) error "submachines no longer supported" #ifdef QEVT_DYN_CTOR //! @deprecated #QEVT_DYN_CTOR, please use #QEVT_PAR_INIT #define QEVT_PAR_INIT #endif //! @deprecated plain 'char' is no longer forbidden in MISRA/AUTOSAR-C++ using char_t = char; //! @deprecated assertion failure handler //! Use Q_onError() instead. #define Q_onAssert(module_, id_) Q_onError(module_, id_) //! @deprecated #Q_NASSERT preprocessor switch to disable QP assertions #ifdef Q_NASSERT // #Q_UNSAFE now replaces the functionality of Q_NASSERT #define Q_UNSAFE //! @deprecated general purpose assertion with user-specified ID //! number that **always** evaluates the `expr_` expression. #define Q_ALLEGE_ID(id_, expr_) (static_cast<void>(expr_)) #elif defined Q_UNSAFE //! @deprecated general purpose assertion with user-specified ID //! number that **always** evaluates the `expr_` expression. #define Q_ALLEGE_ID(id_, expr_) (static_cast<void>(expr_)) #else // QP FuSa Subsystem enabled //! @deprecated general purpose assertion with user-specified ID //! number that **always** evaluates the `expr_` expression. //! @note //! The use of this macro is no longer recommended. #define Q_ALLEGE_ID(id_, expr_) if (!(expr_)) { \ QF_CRIT_STAT \ QF_CRIT_ENTRY(); \ Q_onError(&Q_this_module_[0], (id_)); \ QF_CRIT_EXIT(); \ } else ((void)0) #endif //! @deprecated general purpose assertion without ID number //! that **always** evaluates the `expr_` expression. //! Instead of ID number, this macro is based on the standard //! `__LINE__` macro. //! //! @note The use of this macro is no longer recommended. #define Q_ALLEGE(expr_) Q_ALLEGE_ID(__LINE__, (expr_)) //! Static (compile-time) assertion. //! //! @deprecated //! Use Q_ASSERT_STATIC() or better yet `static_assert()` instead. //! #define Q_ASSERT_COMPILE(expr_) Q_ASSERT_STATIC(expr_) //! @deprecated use QP::QF::NO_MARGIN instead #define QF_NO_MARGIN QP::QF::NO_MARGIN //============================================================================ #if (QP_API_VERSION < 691) //! @deprecated enable the QS global filter #define QS_FILTER_ON(rec_) QS_GLB_FILTER((rec_)) //! @deprecated disable the QS global filter #define QS_FILTER_OFF(rec_) QS_GLB_FILTER(-(rec_)) //! @deprecated enable the QS local filter for SM (state machine) object #define QS_FILTER_SM_OBJ(obj_) (static_cast<void>(0)) //! @deprecated enable the QS local filter for AO (active objects) #define QS_FILTER_AO_OBJ(obj_) (static_cast<void>(0)) //! @deprecated enable the QS local filter for MP (memory pool) object #define QS_FILTER_MP_OBJ(obj_) (static_cast<void>(0)) //! @deprecated enable the QS local filter for EQ (event queue) object #define QS_FILTER_EQ_OBJ(obj_) (static_cast<void>(0)) //! @deprecated enable the QS local filter for TE (time event) object #define QS_FILTER_TE_OBJ(obj_) (static_cast<void>(0)) #ifdef Q_SPY //! @deprecated local Filter for a generic application object `obj_`. #define QS_FILTER_AP_OBJ(obj_) \ (QP::QS::filt_.loc_AP = (obj_)) //! @deprecated begin of a user QS record, instead use QS_BEGIN_ID() #define QS_BEGIN(rec_, obj_) \ if (QS_GLB_FILTER_(rec_) && \ ((QP::QS::filt_.loc[QP::QS::AP_OBJ] == nullptr) \ || (QP::QS::filt_.loc_AP == (obj_)))) \ { \ QS_CRIT_STAT \ QS_CRIT_ENTRY(); \ QP::QS::beginRec_(static_cast<std::uint_fast8_t>(rec_)); \ QS_TIME_PRE(); //! @deprecated output hex-formatted std::uint32_t to the QS record #define QS_U32_HEX(width_, data_) \ (QP::QS::u32_fmt_(static_cast<std::uint8_t>( \ (static_cast<std::uint8_t>((width_) << 4)) | QS_HEX_FMT), (data_))) #else #define QS_FILTER_AP_OBJ(obj_) (static_cast<void>(0)) #define QS_BEGIN(rec_, obj_) if (false) { #define QS_U32_HEX(width_, data_) (Q_UNUSED_PAR(0)) #endif // def Q_SPY //============================================================================ #if (QP_API_VERSION < 680) //! @deprecated //! Macro to specify a tran. in the "me->" impl-strategy. //! Instead use the new impl-strategy without the "me->" pointer, where //! you call tran(Q_STATE_CAST(target_)). #define Q_TRAN(target_) (me->tran(Q_STATE_CAST(target_))) //! @deprecated //! Macro to specify a tran-to-history in the "me->" impl-strategy. //! Instead use the new impl-strategy without the "me->" pointer, where //! you call tran_hist(Q_STATE_CAST(hist_)). #define Q_TRAN_HIST(hist_) (me->tran_hist((hist_))) //! @deprecated //! Macro to specify the superstate in the "me->" impl-strategy. //! Instead use the new impl-strategy without the "me->" pointer, where //! you call super(state_)). #define Q_SUPER(state_) (me->super(Q_STATE_CAST(state_))) //! @deprecated //! Macro to call in a QM state entry-handler. Applicable only to QMSMs. //! Instead use the new impl-strategy without the "me->" pointer, where //! the QM-generated code calls qm_entry(Q_STATE_CAST(state_)). #define QM_ENTRY(state_) (me->qm_entry((state_))) //! @deprecated //! Macro to call in a QM state exit-handler. Applicable only to QMSMs. //! Instead use the new impl-strategy without the "me->" pointer, where //! the QM-generated code calls qm_exit(Q_STATE_CAST(state_)). #define QM_EXIT(state_) (me->qm_exit((state_))) //! @deprecated //! Macro to call in a QM state-handler when it executes a tran. //! Instead use the new impl-strategy without the "me->" pointer, where //! the QM-generated code calls qm_tran((tatbl_)). #define QM_TRAN(tatbl_) (me->qm_tran((tatbl_))) //! @deprecated //! Macro to call in a QM state-handler when it executes an initial tran. //! Instead use the new impl-strategy without the "me->" pointer, where //! the QM-generated code calls qm_tran_init((tatbl_)). #define QM_TRAN_INIT(tatbl_) (me->qm_tran_init((tatbl_))) //! @deprecated //! Macro to call in a QM state-handler when it executes a tran-to-history. //! Instead use the new impl-strategy without the "me->" pointer, where //! the QM-generated code calls qm_tran_hist((history_), (tatbl_)). #define QM_TRAN_HIST(history_, tatbl_) \ (me->qm_tran_hist((history_), (tatbl_))) #endif // QP_API_VERSION < 680 #endif // QP_API_VERSION < 691 #endif // QP_API_VERSION < 750 #endif // QPCPP_HPP_ #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY //============================================================================ //! @cond INTERNAL // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qep_hsm") // immutable events corresponding to the reserved signals. static QP::QEvt const l_reservedEvt_[4] { QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_EMPTY_SIG)), QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_ENTRY_SIG)), QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_EXIT_SIG)), QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_INIT_SIG)) }; // maximum depth of state nesting in a QHsm (including the top level) // must be >= 3 static constexpr std::int_fast8_t QHSM_MAX_NEST_DEPTH_ {6}; } // unnamed namespace // helper macro to handle reserved event in an QHsm #define QHSM_RESERVED_EVT_(state_, sig_) \ ((*(state_))(this, &l_reservedEvt_[(sig_)])) // helper macro to trace state entry #define QS_STATE_ENTRY_(state_, qsId_) \ QS_CRIT_ENTRY(); \ QS_MEM_SYS(); \ QS_BEGIN_PRE(QS_QEP_STATE_ENTRY, (qsId_)) \ QS_OBJ_PRE(this); \ QS_FUN_PRE(state_); \ QS_END_PRE() \ QS_MEM_APP(); \ QS_CRIT_EXIT() // helper macro to trace state exit #define QS_STATE_EXIT_(state_, qsId_) \ QS_CRIT_ENTRY(); \ QS_MEM_SYS(); \ QS_BEGIN_PRE(QS_QEP_STATE_EXIT, (qsId_)) \ QS_OBJ_PRE(this); \ QS_FUN_PRE(state_); \ QS_END_PRE() \ QS_MEM_APP(); \ QS_CRIT_EXIT() //! @endcond //============================================================================ $define ${QEP::versionStr[]} $define ${QEP::QHsm} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY //============================================================================ //! @cond INTERNAL // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qep_msm") // top-state object for QMsm-style state machines QP::QMState const l_msm_top_s = { nullptr, nullptr, nullptr, nullptr, nullptr }; // maximum depth of state nesting in a QMsm (including the top level) static constexpr std::int_fast8_t QMSM_MAX_NEST_DEPTH_ {8}; // maximum length of transition-action array static constexpr std::int_fast8_t QMSM_MAX_TRAN_LENGTH_ {2*QMSM_MAX_NEST_DEPTH_}; // maximum depth of entry levels in a MSM for tran. to history. static constexpr std::int_fast8_t QMSM_MAX_ENTRY_DEPTH_ {4}; } // unnamed namespace //! @endcond //============================================================================ $define ${QEP::QMsm} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { //Q_DEFINE_THIS_MODULE("qf_act") } // unnamed namespace $define ${QF::QActive::registry_[QF_MAX_ACTIVE + 1U]} $define ${QF::QF-pkg} $define ${QF::types::QF_LOG2} #ifndef Q_UNSAFE $define ${QF::types::QPtrDis} #endif #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY //============================================================================ // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_actq") } // unnamed namespace $define ${QF::QActive::post_} $define ${QF::QActive::postLIFO} $define ${QF::QActive::get_} $define ${QF::QTicker} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_defer") } // unnamed namespace $define ${QF::QActive::defer} $define ${QF::QActive::recall} $define ${QF::QActive::flushDeferred} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY #if (QF_MAX_EPOOL > 0U) // mutable events configured? // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_dyn") } // unnamed namespace $define ${QF::QF-dyn} #endif // (QF_MAX_EPOOL > 0U) mutable events configured #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_mem") } // unnamed namespace $define ${QF::QMPool} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_qact") } // unnamed namespace $define ${QF::QActive::QActive} $define ${QF::QActive::register_} $define ${QF::QActive::unregister_} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { //Q_DEFINE_THIS_MODULE("qf_qmact") } // unnamed namespace $define ${QF::QMActive} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_qeq") } // unnamed namespace $define ${QF::QEQueue} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_ps") } // unnamed namespace $define ${QF::QActive::subscrList_} $define ${QF::QActive::maxPubSignal_} $define ${QF::QActive::psInit} $define ${QF::QActive::publish_} $define ${QF::QActive::subscribe} $define ${QF::QActive::unsubscribe} $define ${QF::QActive::unsubscribeAll} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qf_time") } // unnamed namespace $define ${QF::QTimeEvt} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // protection against including this source file in a wrong project #ifndef QV_HPP_ #error "Source file included in a project NOT based on the QV kernel" #endif // QV_HPP_ // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qv") } // unnamed namespace $define ${QV::QV-base} $define ${QV::QF-cust} $define ${QV::QActive} #define QP_IMPL // this is QP implementation #include "qp_port.hpp" // QP port #include "qp_pkg.hpp" // QP package-scope interface #include "qsafe.h" // QP Functional Safety (FuSa) Subsystem #ifdef Q_SPY // QS software tracing enabled? #include "qs_port.hpp" // QS port #include "qs_pkg.hpp" // QS facilities for pre-defined trace records #else #include "qs_dummy.hpp" // disable the QS software tracing #endif // Q_SPY // protection against including this source file in a wrong project #ifndef QK_HPP_ #error "Source file included in a project NOT based on the QK kernel" #endif // QK_HPP_ // unnamed namespace for local definitions with internal linkage namespace { Q_DEFINE_THIS_MODULE("qk") } // unnamed namespace $define ${QK::QK-base} extern "C" { $define ${QK-extern-C} } // extern "C" $define ${QK::QF-cust} $define ${QK::QActive} #include "qstamp.hpp" namespace QP { char const BUILD_DATE[12] = __DATE__; char const BUILD_TIME[9] = __TIME__; } // namespace QP