QP/C++ Real-Time Embedded Framework (RTEF)
The model is used to generate the whole QP/C++ source code.
Copyright (C) 2005 Quantum Leaps, LLC <state-machine.com>.
SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-QL-commercial
This software is dual-licensed under the terms of the open source GNU
General Public License version 3 (or any later version), or alternatively,
under the terms of one of the closed source Quantum Leaps commercial
licenses.
The terms of the open source GNU General Public License version 3
can be found at: <www.gnu.org/licenses/gpl-3.0>
The terms of the closed source Quantum Leaps commercial licenses
can be found at: <www.state-machine.com/licensing>
Redistributions in source code must retain this copyright notice.
Plagiarizing this software to sidestep the license obligations is illegal.
Contact information:
<www.state-machine.com/licensing>
<info@state-machine.com>
//! alias for line numbers in assertions and return from QF::run()
= int;
//! alias for enumerations used for event signals
= int;
//! alias for 32-bit IEEE 754 floating point numbers
//!
//! @note
//! QP does not use floating-point types anywhere in the internal
//! implementation, except in QS software tracing, where utilities for
//! output of floating-point numbers are provided for application-specific
//! trace records.
= float;
//! alias for 64-bit IEEE 754 floating point numbers
//!
//! @note
//! QP does not use floating-point types anywhere in the internal
//! implementation, except in QS software tracing, where utilities for
//! output of floating-point numbers are provided for application-specific
//! trace records.
= double;
//! The size (in bytes) of the signal of an event. Valid values:
//! 1U, 2U, or 4U; default 2U
//!
//! @details
//! This macro can be defined in the QEP port file (qep_port.hpp) to
//! configure the QP::QSignal type. When the macro is not defined, the
//! default of 2 bytes is applied.
2U
//! Macro to generate a declaration of a state-handler, state-caller and
//! a state-object for a given state in a subclass of QP::QHsm.
\
QP::QState state_ ## _h(QP::QEvt const * const e); \
static QP::QState state_(void * const me, QP::QEvt const * const e)
//! Macro to generate a declaration of a state-handler, state-caller and
//! a state-object for a given state in a subclass of QP::QHsm.
\
QP::QState subclass_::state_(void * const me, QP::QEvt const * const e) { \
return static_cast<subclass_ *>(me)->state_ ## _h(e); } \
QP::QState subclass_::state_ ## _h(QP::QEvt const * const e)
//! Macro to specify that the event was handled
(Q_RET_HANDLED)
//! Macro to specify that the event was NOT handled
//! due to a guard condition evaluating to 'false'
(Q_RET_UNHANDLED)
//! Perform downcast of an event onto a subclass of QEvt `class_`
//!
//! @details
//! This macro encapsulates the downcast of QEvt pointers, which violates
//! MISRA-C 2004 rule 11.4(advisory). This macro helps to localize this
//! deviation.
(static_cast<subclass_ const *>(e))
//! Macro to perform casting to QStateHandler.
//!
//! @details
//! This macro encapsulates the cast of a specific state handler function
//! pointer to QStateHandler, which violates MISRA-C 2004 rule 11.4(advisory).
//! This macro helps to localize this deviation.
\
(reinterpret_cast<QP::QStateHandler>(handler_))
//! Macro to perform casting to QActionHandler.
//!
//! @details
//! This macro encapsulates the cast of a specific action handler function
//! pointer to QActionHandler, which violates MISRA-C2004 rule 11.4(advisory).
//! This macro helps to localize this deviation.
\
(reinterpret_cast<QP::QActionHandler>(act_))
//! Macro to generate a declaration of a state-handler, state-caller and
//! a state-object for a given state in a subclass of QP::QMsm.
\
QP::QState state_ ## _h(QP::QEvt const * const e); \
static QP::QState state_(void * const me, QP::QEvt const * const e); \
static QP::QMState const state_ ## _s
//! Macro to generate a declaration of a state-handler, state-caller and
//! a state-object for a given *submachine* state in a subclass of QP::QMsm.
\
QP::QState state_ ## _h(QP::QEvt const * const e);\
static QP::QState state_(void * const me, QP::QEvt const * const e); \
static SM_ ## subm_ const state_ ## _s
//! Macro to generate a declaration of an action-handler and action-caller
//! in a subclass of QP::QMsm.
\
QP::QState action_ ## _h(); \
static QP::QState action_(void * const me)
//! Macro to generate a definition of a state-caller and state-handler
//! for a given state in a subclass of QP::QMsm.
\
QP::QState subclass_::state_(void * const me, QP::QEvt const * const e) { \
return static_cast<subclass_ *>(me)->state_ ## _h(e); } \
QP::QState subclass_::state_ ## _h(QP::QEvt const * const e)
//! Macro to generate a definition of an action-caller and action-handler
//! in a subclass of QP::QMsm.
\
QP::QState subclass_::action_(void * const me) { \
return static_cast<subclass_ *>(me)->action_ ## _h(); } \
QP::QState subclass_::action_ ## _h()
//! Macro for a QM action-handler when it handles the event.
(Q_RET_HANDLED)
//! Macro for a QM action-handler when it does not handle the event
//! due to a guard condition evaluating to false.
(Q_RET_HANDLED)
//! Macro for a QM action-handler when it passes the event to the superstate
(Q_RET_SUPER)
//! Macro to provide strictly-typed zero-state to use for submachines.
//! Applicable to suclasses of QP::QMsm.
(nullptr)
//! Macro to provide strictly-typed zero-action to terminate action lists
//! in the transition-action-tables in QP::QMsm.
(nullptr)
//! Helper macro to clearly mark unused parameters of functions.
(static_cast<void>(par_))
//! Helper macro to calculate static dimension of a 1-dim `array_`
//!
//! @param[in] array_ 1-dimensional array
//!
//! @returns
//! the length of the array (number of elements it can hold)
(sizeof(array_) / sizeof((array_)[0U]))
//! Perform cast from unsigned integer `uint_` to pointer of type `type_`
//!
//! @details
//! This macro encapsulates the cast to (type_ *), which QP ports or
//! application might use to access embedded hardware registers.
//! Such uses can trigger PC-Lint "Note 923: cast from int to pointer"
//! and this macro helps to encapsulate this deviation.
(reinterpret_cast<type_ *>(uint_))
//! Initializer of static constant QEvt instances
//!
//! @details
//! This macro encapsulates the ugly casting of enumerated signals
//! to QSignal and constants for QEvt.poolID and QEvt.refCtr_.
{ static_cast<QP::QSignal>(sig_), 0U, 0U }
//! the current QP version number string based on QP_VERSION_STR
{QP_VERSION_STR};
//! QSignal represents the signal of an event
//!
//! @details
//! The relationship between an event and a signal is as follows. A signal
//! in UML is the specification of an asynchronous stimulus that triggers
//! reactions, and as such is an essential part of an event. (The signal
//! conveys the type of the occurrence--what happened?) However, an event
//! can also contain additional quantitative information about the
//! occurrence in form of event parameters.
= std::uint16_t;
= std::uint8_t;
= std::uint32_t;
//! Event class
//!
//! @details
//! QP::QEvt represents events without parameters and serves as the
//! base class for derivation of events with parameters.
//!
//! @note
//! When #Q_EVT_CTOR and #Q_EVT_VIRTUAL are NOT defined, the QP::QEvt is
//! a POD (Plain Old Data). Otherwise, it is a class with constructors
//! and virtual destructor.
//!
//! @usage
//! The following example illustrates how to add an event parameter by
//! inheriting from the QP::QEvt class.
//! @include qep_qevt.cpp
//! signal of the event instance
//! @tr{RQP002}
//! pool ID (0 for static, immutable event)
//! @tr{RQP003}
//! reference counter (only used for dynamic, mutable events)
//! @tr{RQP003}
noexcept
//! QP::QEvt constructor when the macro #Q_EVT_CTOR is defined
: sig(s)
// poolId_/refCtr_ intentionally uninitialized
noexcept
//! QP::QEvt constructor (overload for static, immutable events)
: sig(s),
poolId_(0U),
refCtr_(0U)
noexcept
//! QP::QEvt virtual destructor when the macro #Q_EVT_VIRTUAL is defined
// empty
//! Type returned from state-handler functions
= std::uint_fast8_t;
//! Pointer to state-handler function
= QState (*)(void * const me, QEvt const * const e);
//! Pointer to an action-handler function
= QState (*)(void * const me);
//! forward declaration
//! Pointer to an extended thread-handler function
= void (*)(QXThread * const me);
//! State object for the QP::QMsm class (QM State Machine).
//!
//! @details
//! This class groups together the attributes of a QP::QMsm state, such as
//! the parent state (state nesting), the associated state handler function
//! and the exit action handler function. These attributes are used inside
//! the QP::QMsm::dispatch() and QP::QMsm::init() functions.
//!
//! @attention
//! The QP::QMState class is only intended for the QM code generator and
//! should not be used in hand-crafted code.
{
QMState const * superstate; //!< superstate of this state
QStateHandler const stateHandler; //!< state handler function
QActionHandler const entryAction; //!< entry action handler function
QActionHandler const exitAction; //!< exit action handler function
QActionHandler const initAction; //!< init action handler function
};
//! Transition-Action Table for the QP::QMsm State Machine.
{
QMState const * target; //!< target of the transition
QActionHandler const act[1]; //!< array of actions
};
//! Attribute of for the QP::QHsm class (Hierarchical State Machine)
//!
//! @details
//! This union represents possible values stored in the 'state' and 'temp'
//! attributes of the QP::QHsm class.
{
QStateHandler fun; //!< pointer to a state handler function
QActionHandler act; //!< pointer to an action-handler function
QXThreadHandler thr; //!< pointer to an thread-handler function
QMState const *obj; //!< pointer to QMState object
QMTranActTable const *tatbl; //!< transition-action table
};
//! Type returned from state-handler functions
{4};
//! Hierarchical State Machine abstract base class (ABC)
//!
//! @details
//! QP::QHsm represents a Hierarchical State Machine (HSM) with full support
//! for hierarchical nesting of states, entry/exit actions, and initial
//! transitions in any composite state. QHsm inherits QP::QMsm without adding
//! new attributes, so it takes the same amount of RAM as QP::QMsm.<br>
//!
//! QP::QHsm is also the base class for the QP::QMsm state machine, which
//! provides better efficiency, but requires the use of the QM modeling tool
//! to generate code.
//!
//! @note
//! QP::QHsm is not intended to be instantiated directly, but rather serves as
//! the base class for derivation of state machines in the application code.
//!
//! @usage
//! The following example illustrates how to derive a state machine class
//! from QP::QHsm.
//! @include qep_qhsm.cpp
//! current active state (the state-variable)
//! temporary: transition chain, target state, etc.
//! Maximum nesting depth of states in HSM
{6};
// friends...
//! Reserved signals by the HSM-style state machine
//! implementation strategy.
: QSignal {
Q_EMPTY_SIG, //!< signal to execute the default case
Q_ENTRY_SIG, //!< signal for entry actions
Q_EXIT_SIG, //!< signal for exit actions
Q_INIT_SIG //!< signal for nested initial transitions
};
//! All possible return values from state-handlers
: std::uint_fast8_t {
// unhandled and need to "bubble up"
Q_RET_SUPER, //!< event passed to superstate to handle
Q_RET_SUPER_SUB, //!< event passed to submachine superstate
Q_RET_UNHANDLED, //!< event unhandled due to a guard
// handled and do not need to "bubble up"
Q_RET_HANDLED, //!< event handled (internal transition)
Q_RET_IGNORED, //!< event silently ignored (bubbled up to top)
// entry/exit
Q_RET_ENTRY, //!< state entry action executed
Q_RET_EXIT, //!< state exit action executed
// no side effects
Q_RET_NULL, //!< return value without any effect
// transitions need to execute transition-action table in QP::QMsm
Q_RET_TRAN, //!< regular transition
Q_RET_TRAN_INIT, //!< initial transition in a state or submachine
Q_RET_TRAN_EP, //!< entry-point transition into a submachine
// transitions that additionally clobber QHsm.m_state
Q_RET_TRAN_HIST, //!< transition to history of a given state
Q_RET_TRAN_XP //!< exit-point transition out of a submachine
};
noexcept
//! protected constructor of QHsm
m_state.fun = Q_STATE_CAST(&top);
m_temp.fun = initial;
noexcept
//! virtual destructor
// empty
//! Executes the top-most initial transition in QP::QHsm
//!
//! @details
//! Executes the top-most initial transition in a HSM.
//!
//! @param[in] e pointer to an extra parameter (might be NULL)
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @note
//! Must be called exactly __once__ before the QP::QHsm::dispatch().
//!
//! @tr{RQP103} @tr{RQP120I} @tr{RQP120D}
#ifdef Q_SPY
if ((QS::priv_.flags & 0x01U) == 0U) {
QS::priv_.flags |= 0x01U;
QS_FUN_DICTIONARY(&QP::QHsm::top);
}
#else
Q_UNUSED_PAR(qs_id);
#endif
QStateHandler t = m_state.fun;
//! @pre ctor must have been executed and initial tran NOT taken
Q_REQUIRE_ID(200, (m_temp.fun != nullptr)
&& (t == Q_STATE_CAST(&top)));
// execute the top-most initial transition
QState r = (*m_temp.fun)(this, Q_EVT_CAST(QEvt));
// the top-most initial transition must be taken
Q_ASSERT_ID(210, r == Q_RET_TRAN);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the source state
QS_FUN_PRE_(m_temp.fun); // the target of the initial transition
QS_END_PRE_()
// drill down into the state hierarchy with initial transitions...
do {
QStateHandler path[MAX_NEST_DEPTH_]; // tran entry path array
std::int_fast8_t ip = 0; // entry path index
path[0] = m_temp.fun;
static_cast<void>(hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG));
while (m_temp.fun != t) {
++ip;
Q_ASSERT_ID(220, ip < MAX_NEST_DEPTH_);
path[ip] = m_temp.fun;
static_cast<void>(hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG));
}
m_temp.fun = path[0];
// retrace the entry path in reverse (desired) order...
do {
hsm_state_entry_(this, path[ip], qs_id); // enter path[ip]
--ip;
} while (ip >= 0);
t = path[0]; // current state becomes the new source
r = hsm_reservedEvt_(this, t, Q_INIT_SIG); // execute initial tran.
#ifdef Q_SPY
if (r == Q_RET_TRAN) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the source state
QS_FUN_PRE_(m_temp.fun); // the target of the initial tran.
QS_END_PRE_()
}
#endif // Q_SPY
} while (r == Q_RET_TRAN);
QS_BEGIN_PRE_(QS_QEP_INIT_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the new active state
QS_END_PRE_()
m_state.fun = t; // change the current active state
m_temp.fun = t; // mark the configuration as stable
//! overloaded init(qs_id)
//!
//! @details
//! Executes the top-most initial transition in a HSM (overloaded).
//!
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! QHsm::init() must be called exactly **once** before
//! QHsm::dispatch()
init(nullptr, qs_id);
//! Dispatches an event to QP::QHsm
//!
//! @details
//! Dispatches an event for processing to a hierarchical state machine.
//! The event dispatching represents one run-to-completion (RTC) step.
//!
//! @param[in] e pointer to the event to be dispatched to the HSM
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! This state machine must be initialized by calling QP::QHsm::init()
//! exactly **once** before calling QP::QHsm::dispatch().
//!
//! @tr{RQP103}
//! @tr{RQP120A} @tr{RQP120B} @tr{RQP120C} @tr{RQP120D} @tr{RQP120E}
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QStateHandler t = m_state.fun;
QS_CRIT_STAT_
//! @pre the current state must be initialized and
//! the state configuration must be stable
Q_REQUIRE_ID(400, (t != nullptr)
&& (t == m_temp.fun));
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the current state
QS_END_PRE_()
QStateHandler s;
QState r;
// process the event hierarchically...
//! @tr{RQP120A}
do {
s = m_temp.fun;
r = (*s)(this, e); // invoke state handler s
if (r == Q_RET_UNHANDLED) { // unhandled due to a guard?
QS_BEGIN_PRE_(QS_QEP_UNHANDLED, qs_id)
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s); // the current state
QS_END_PRE_()
r = hsm_reservedEvt_(this, s, Q_EMPTY_SIG); // find superstate of s
}
} while (r == Q_RET_SUPER);
// regular transition taken?
//! @tr{RQP120E}
if (r >= Q_RET_TRAN) {
QStateHandler path[MAX_NEST_DEPTH_];
path[0] = m_temp.fun; // save the target of the transition
path[1] = t;
path[2] = s;
// exit current state to transition source s...
//! @tr{RQP120C}
for (; t != s; t = m_temp.fun) {
// exit handled?
if (hsm_state_exit_(this, t, qs_id)) {
// find superstate of t
static_cast<void>(hsm_reservedEvt_(this, t, Q_EMPTY_SIG));
}
}
std::int_fast8_t ip = hsm_tran(path, qs_id); // the HSM transition
#ifdef Q_SPY
if (r == Q_RET_TRAN_HIST) {
QS_BEGIN_PRE_(QS_QEP_TRAN_HIST, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the source of the transition
QS_FUN_PRE_(path[0]); // the target of the tran. to history
QS_END_PRE_()
}
#endif // Q_SPY
// execute state entry actions in the desired order...
//! @tr{RQP120B}
for (; ip >= 0; --ip) {
hsm_state_entry_(this, path[ip], qs_id); // enter path[ip]
}
t = path[0]; // stick the target into register
m_temp.fun = t; // update the next state
// drill into the target hierarchy...
//! @tr{RQP120I}
while (hsm_reservedEvt_(this, t, Q_INIT_SIG) == Q_RET_TRAN) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t); // the source (pseudo)state
QS_FUN_PRE_(m_temp.fun); // the target of the transition
QS_END_PRE_()
ip = 0;
path[0] = m_temp.fun;
// find superstate
static_cast<void>(hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG));
while (m_temp.fun != t) {
++ip;
path[ip] = m_temp.fun;
// find superstate
static_cast<void>(
hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG));
}
m_temp.fun = path[0];
// entry path must not overflow
Q_ASSERT_ID(410, ip < MAX_NEST_DEPTH_);
// retrace the entry path in reverse (correct) order...
do {
hsm_state_entry_(this, path[ip], qs_id); // enter path[ip]
--ip;
} while (ip >= 0);
t = path[0];
}
QS_BEGIN_PRE_(QS_QEP_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s); // the source of the transition
QS_FUN_PRE_(t); // the new active state
QS_END_PRE_()
}
#ifdef Q_SPY
else if (r == Q_RET_HANDLED) {
QS_BEGIN_PRE_(QS_QEP_INTERN_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s); // the source state
QS_END_PRE_()
}
else {
QS_BEGIN_PRE_(QS_QEP_IGNORED, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.fun);// the current state
QS_END_PRE_()
}
#else
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
#endif // Q_SPY
m_state.fun = t; // change the current active state
m_temp.fun = t; // mark the configuration as stable
noexcept
//! The top-state handler
//!
//! @details
//! The QHsm::top() state handler is the ultimate root of state
//! hierarchy in all HSMs derived from QP::QHsm.
//!
//! @param[in] me pointer to the HSM instance
//! @param[in] e pointer to the event to be dispatched to the HSM
//!
//! @returns
//! Always returns #Q_RET_IGNORED, which means that the top state ignores
//! all events.
//!
//! @note
//! The parameters to this state handler are not used. They are provided
//! for conformance with the state-handler function signature
//! QP::QStateHandler.
//!
//! @tr{RQP103} @tr{RQP120T}
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(e);
return Q_RET_IGNORED; // the top state ignores all events
const noexcept
//! Obtain the current state (state handler function)
//!
//! @note used in the QM code generation
return m_state.fun;
noexcept
//! Get the current state handler of the QP::QHsm
return m_state.fun;
noexcept
//! Tests if a given state is part of the current active state
//! configuration
//!
//! @details
//! Tests if a state machine derived from QHsm is-in a given state.
//!
//! @note
//! For a HSM, to "be in a state" means also to be in a superstate of
//! of the state.
//!
//! @param[in] s pointer to the state-handler function to be tested
//!
//! @returns
//! 'true' if the HSM is in the `state` and 'false' otherwise
//!
//! @tr{RQP103}
//! @tr{RQP120S}
//! @pre state configuration must be stable
Q_REQUIRE_ID(600, m_temp.fun == m_state.fun);
bool inState = false; // assume that this HSM is not in 'state'
// scan the state hierarchy bottom-up
QState r;
do {
// do the states match?
if (m_temp.fun == s) {
inState = true; // 'true' means that match found
r = Q_RET_IGNORED; // cause breaking out of the loop
}
else {
r = hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG);
}
} while (r != Q_RET_IGNORED); // QHsm::top() state not reached
m_temp.fun = m_state.fun; // restore the stable state configuration
return inState; // return the status
noexcept
//! Obtain the current active child state of a given parent
//!
//! @note used in the QM code generation
QStateHandler child = m_state.fun; // start with the current state
bool isFound = false; // start with the child not found
// establish stable state configuration
m_temp.fun = m_state.fun;
QState r;
do {
// is this the parent of the current child?
if (m_temp.fun == parent) {
isFound = true; // child is found
r = Q_RET_IGNORED; // cause breaking out of the loop
}
else {
child = m_temp.fun;
r = hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG);
}
} while (r != Q_RET_IGNORED); // QHsm::top() state not reached
m_temp.fun = m_state.fun; // establish stable state configuration
//! @post the child must be confirmed
Q_ENSURE_ID(810, isFound);
#ifdef Q_NASSERT
Q_UNUSED_PAR(isFound);
#endif
return child; // return the child
noexcept
//! Helper function to specify a state transition
m_temp.fun = target;
return Q_RET_TRAN;
noexcept
//! Helper function to specify a transition to history
m_temp.fun = hist;
return Q_RET_TRAN_HIST;
noexcept
//! Helper function to specify the superstate of a given state
m_temp.fun = superstate;
return Q_RET_SUPER;
noexcept
//! Helper function to specify a regular state transition
//! in a QM state-handler
m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl);
return Q_RET_TRAN;
noexcept
//! Helper function to specify an initial state transition
//! in a QM state-handler
m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl);
return Q_RET_TRAN_INIT;
noexcept
//! Helper function to specifiy a transition to history
//! in a QM state-handler
m_state.obj = hist;
m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl);
return Q_RET_TRAN_HIST;
noexcept
//! Helper function to specify a transition to an entry point
//! to a submachine state in a QM state-handler
m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl);
return Q_RET_TRAN_EP;
noexcept
//! Helper function to specify a transition to an exit point
//! from a submachine state in a QM state-handler
m_state.act = xp;
m_temp.tatbl = static_cast<QP::QMTranActTable const *>(tatbl);
return Q_RET_TRAN_XP;
noexcept
//! Helper function to specify a state entry in a QM state-handler
m_temp.obj = s;
return Q_RET_ENTRY;
noexcept
//! Helper function to specify a state entry in a QM state-handler
static_cast<void>(s); // unused parameter
return Q_RET_ENTRY;
noexcept
//! Helper function to specify a state exit in a QM state-handler
m_temp.obj = s;
return Q_RET_EXIT;
noexcept
//! Helper function to specify a state exit in a QM state-handler
static_cast<void>(s); // unused parameter
return Q_RET_EXIT;
noexcept
//! Helper function to specify a submachine exit in a QM state-handler
m_temp.obj = s;
return Q_RET_EXIT;
noexcept
//! Helper function to call in a QM state-handler when it passes
//! the event to the host submachine state to handle an event.
m_temp.obj = s;
return Q_RET_SUPER_SUB;
//! @details
//! helper function to execute transition sequence in a hierarchical
//! state machine (HSM).
//!
//! @param[in,out] path array of pointers to state-handler functions
//! to execute the entry actions
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! the depth of the entry path stored in the `path` parameter.
//!
//! @tr{RQP103}
//! @tr{RQP120E} @tr{RQP120F}
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
std::int_fast8_t ip = -1; // transition entry path index
QStateHandler t = path[0];
QStateHandler const s = path[2];
// (a) check source==target (transition to self)
if (s == t) {
// exit the source
static_cast<void>(hsm_state_exit_(this, s, qs_id));
ip = 0; // enter the target
}
else {
// superstate of target
static_cast<void>(hsm_reservedEvt_(this, t, Q_EMPTY_SIG));
t = m_temp.fun;
// (b) check source==target->super
if (s == t) {
ip = 0; // enter the target
}
else {
// superstate of src
static_cast<void>(hsm_reservedEvt_(this, s, Q_EMPTY_SIG));
// (c) check source->super==target->super
if (m_temp.fun == t) {
// exit the source
static_cast<void>(hsm_state_exit_(this, s, qs_id));
ip = 0; // enter the target
}
else {
// (d) check source->super==target
if (m_temp.fun == path[0]) {
// exit the source
static_cast<void>(hsm_state_exit_(this, s, qs_id));
}
else {
// (e) check rest of source==target->super->super..
// and store the entry path along the way
std::int_fast8_t iq = 0; // indicate that LCA was found
ip = 1; // enter target and its superstate
path[1] = t; // save the superstate of target
t = m_temp.fun; // save source->super
// find target->super->super
QState r = hsm_reservedEvt_(this, path[1], Q_EMPTY_SIG);
while (r == Q_RET_SUPER) {
++ip;
path[ip] = m_temp.fun; // store the entry path
if (m_temp.fun == s) { // is it the source?
// indicate that the LCA was found
iq = 1;
// entry path must not overflow
Q_ASSERT_ID(510, ip < MAX_NEST_DEPTH_);
--ip; // do not enter the source
r = Q_RET_HANDLED; // terminate the loop
}
// it is not the source, keep going up
else {
r = hsm_reservedEvt_(this, m_temp.fun, Q_EMPTY_SIG);
}
}
// the LCA not found yet?
if (iq == 0) {
// entry path must not overflow
Q_ASSERT_ID(520, ip < MAX_NEST_DEPTH_);
// exit the source
static_cast<void>(hsm_state_exit_(this, s, qs_id));
// (f) check the rest of source->super
// == target->super->super...
//
iq = ip;
r = Q_RET_IGNORED; // indicate LCA NOT found
do {
// is this the LCA?
if (t == path[iq]) {
r = Q_RET_HANDLED; // indicate LCA found
ip = iq - 1; // do not enter LCA
iq = -1; // cause termination of the loop
}
else {
--iq; // try lower superstate of target
}
} while (iq >= 0);
// LCA not found yet?
if (r != Q_RET_HANDLED) {
// (g) check each source->super->...
// for each target->super...
//
r = Q_RET_IGNORED; // keep looping
do {
// exit from t handled?
if (hsm_state_exit_(this, t, qs_id)) {
// find superstate of t
static_cast<void>(
hsm_reservedEvt_(this, t, Q_EMPTY_SIG));
}
t = m_temp.fun; // set to super of t
iq = ip;
do {
// is this LCA?
if (t == path[iq]) {
ip = iq - 1; // do not enter LCA
iq = -1; // break out of inner loop
r = Q_RET_HANDLED; // break outer loop
}
else {
--iq;
}
} while (iq >= 0);
} while (r != Q_RET_HANDLED);
}
}
}
}
}
}
return ip;
//! QM State Machine implementation strategy
//!
//! @details
//! QP::QMsm (QM State Machine) provides a more efficient state machine
//! implementation strategy than QHsm, but requires the use of the QM
//! modeling tool, but are the fastest and need the least run-time
//! support (the smallest event-processor taking up the least code space).
//!
//! @note
//! QP::QMsm is not intended to be instantiated directly, but rather serves as
//! the base class for derivation of state machines in the application code.
//!
//! @usage
//! The following example illustrates how to derive a state machine class
//! from QP::QMsm:
//! @include qep_qmsm.cpp
//! the top state object for the QP::QMsm
= {
nullptr,
nullptr,
nullptr,
nullptr,
nullptr
};
// friends...
//! maximum depth of implemented entry levels for transitions to history
{4};
override
//! Performs the second step of SM initialization by triggering
//! the top-most initial transition.
//!
//! @details
//! Executes the top-most initial transition in a MSM.
//!
//! @param[in] e pointer to an extra parameter (might be nullptr)
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! QMsm::init() must be called exactly **once** before QMsm::dispatch()
//! @pre the top-most initial transition must be initialized, and the
//! initial transition must not be taken yet.
Q_REQUIRE_ID(200, (m_temp.fun != nullptr)
&& (m_state.obj == &msm_top_s));
// execute the top-most initial tran.
QState r = (*m_temp.fun)(this, Q_EVT_CAST(QEvt));
// initial tran. must be taken
Q_ASSERT_ID(210, r == Q_RET_TRAN_INIT);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.obj->stateHandler); // source handler
QS_FUN_PRE_(m_temp.tatbl->target->stateHandler); // target handler
QS_END_PRE_()
// set state to the last tran. target
m_state.obj = m_temp.tatbl->target;
// drill down into the state hierarchy with initial transitions...
do {
r = execTatbl_(m_temp.tatbl, qs_id); // execute the tran-action table
} while (r >= Q_RET_TRAN_INIT);
QS_BEGIN_PRE_(QS_QEP_INIT_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.obj->stateHandler); // the new current state
QS_END_PRE_()
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
noexcept
//! Protected constructor
//! @details
//! Performs the first step of initialization by assigning the initial
//! pseudostate to the currently active state of the state machine.
//!
//! @param[in] initial the top-most initial transition for the MSM.
//!
//! @note
//! The constructor is protected to prevent direct instantiating of the
//! QP::QMsm objects. This class is intended for subclassing only.
//!
//! @sa
//! The QP::QMsm example illustrates how to use the QMsm constructor
//! in the constructor initializer list of the derived state machines.
: QHsm(initial)
m_state.obj = &msm_top_s;
m_temp.fun = initial;
override
//! overloaded init(qs_id)
//!
//! @details
//! Executes the top-most initial transition in a MSM (overloaded).
//!
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! QMsm::init() must be called exactly **once** before QMsm::dispatch()
QMsm::init(nullptr, qs_id);
override
//! Dispatches an event to a MSM
//!
//! @details
//! Dispatches an event for processing to a meta state machine (MSM).
//! The processing of an event represents one run-to-completion (RTC) step.
//!
//! @param[in] e pointer to the event to be dispatched to the MSM
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @note
//! Must be called after QMsm::init().
QMState const *s = m_state.obj; // store the current state
//! @pre current state must be initialized
Q_REQUIRE_ID(300, s != nullptr);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s->stateHandler); // the current state handler
QS_END_PRE_()
// scan the state hierarchy up to the top state...
QMState const *t = s;
QState r;
do {
r = (*t->stateHandler)(this, e); // call state handler function
// event handled? (the most frequent case)
if (r >= Q_RET_HANDLED) {
break; // done scanning the state hierarchy
}
// event unhandled and passed to the superstate?
else if (r == Q_RET_SUPER) {
t = t->superstate; // advance to the superstate
}
// event unhandled and passed to a submachine superstate?
else if (r == Q_RET_SUPER_SUB) {
t = m_temp.obj; // current host state of the submachie
}
// event unhandled due to a guard?
else if (r == Q_RET_UNHANDLED) {
QS_BEGIN_PRE_(QS_QEP_UNHANDLED, qs_id)
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t->stateHandler); // the current state
QS_END_PRE_()
t = t->superstate; // advance to the superstate
}
else {
// no other return value should be produced
Q_ERROR_ID(310);
}
} while (t != nullptr);
// any kind of transition taken?
if (r >= Q_RET_TRAN) {
#ifdef Q_SPY
QMState const * const ts = t; // transition source for QS tracing
// the transition source state must not be nullptr
Q_ASSERT_ID(320, ts != nullptr);
#endif // Q_SPY
do {
// save the transition-action table before it gets clobbered
QMTranActTable const * const tatbl = m_temp.tatbl;
QHsmAttr tmp; // temporary to save intermediate values
// was TRAN, TRAN_INIT, or TRAN_EP taken?
if (r <= Q_RET_TRAN_EP) {
m_temp.obj = nullptr; // clear
exitToTranSource_(s, t, qs_id);
r = execTatbl_(tatbl, qs_id);
s = m_state.obj;
}
// was a transition segment to history taken?
else if (r == Q_RET_TRAN_HIST) {
tmp.obj = m_state.obj; // save history
m_state.obj = s; // restore the original state
exitToTranSource_(s, t, qs_id);
static_cast<void>(execTatbl_(tatbl, qs_id));
r = enterHistory_(tmp.obj, qs_id);
s = m_state.obj;
}
// was a transition segment to an exit point taken?
else if (r == Q_RET_TRAN_XP) {
tmp.act = m_state.act; // save XP action
m_state.obj = s; // restore the original state
r = (*tmp.act)(this); // execute the XP action
if (r == Q_RET_TRAN) { // XP -> TRAN ?
#ifdef Q_SPY
tmp.tatbl = m_temp.tatbl; // save m_temp
#endif // Q_SPY
exitToTranSource_(s, t, qs_id);
// take the tran-to-XP segment inside submachine
static_cast<void>(execTatbl_(tatbl, qs_id));
s = m_state.obj;
#ifdef Q_SPY
m_temp.tatbl = tmp.tatbl; // restore m_temp
#endif // Q_SPY
}
else if (r == Q_RET_TRAN_HIST) { // XP -> HIST ?
tmp.obj = m_state.obj; // save the history
m_state.obj = s; // restore the original state
#ifdef Q_SPY
s = m_temp.obj; // save m_temp
#endif // Q_SPY
exitToTranSource_(m_state.obj, t, qs_id);
// take the tran-to-XP segment inside submachine
static_cast<void>(execTatbl_(tatbl, qs_id));
#ifdef Q_SPY
m_temp.obj = s; // restore me->temp
#endif // Q_SPY
s = m_state.obj;
m_state.obj = tmp.obj; // restore the history
}
else {
// TRAN_XP must NOT be followed by any other tran type
Q_ASSERT_ID(330, r < Q_RET_TRAN);
}
}
else {
// no other return value should be produced
Q_ERROR_ID(340);
}
t = s; // set target to the current state
} while (r >= Q_RET_TRAN);
QS_BEGIN_PRE_(QS_QEP_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(ts->stateHandler); // the transition source
QS_FUN_PRE_(s->stateHandler); // the new active state
QS_END_PRE_()
}
#ifdef Q_SPY
// was the event handled?
else if (r == Q_RET_HANDLED) {
// internal tran. source can't be nullptr
Q_ASSERT_ID(340, t != nullptr);
QS_BEGIN_PRE_(QS_QEP_INTERN_TRAN, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(t->stateHandler); // the source state
QS_END_PRE_()
}
// event bubbled to the 'top' state?
else if (t == nullptr) {
QS_BEGIN_PRE_(QS_QEP_IGNORED, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s->stateHandler); // the current state
QS_END_PRE_()
}
#endif // Q_SPY
else {
// empty
}
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
const noexcept
//! Tests if a given state is part of the active state configuration
//!
//! @details
//! Tests if a state machine derived from QMsm is-in a given state.
//!
//! @note
//! For a MSM, to "be-in" a state means also to "be-in" a superstate of
//! of the state.
//!
//! @param[in] st pointer to the QMState object that corresponds to the
//! tested state.
//! @returns
//! 'true' if the MSM is in the \c st and 'false' otherwise
bool inState = false; // assume that this MSM is not in 'state'
for (QMState const *s = m_state.obj;
s != nullptr;
s = s->superstate)
{
if (s == st) {
inState = true; // match found, return 'true'
break;
}
}
return inState;
noexcept = delete
//! disallow inherited top() function in QP::QMsm and subclasses
//! @sa QMsm::msm_top_s
const noexcept
//! Return the current active state object (read only)
return m_state.obj;
const noexcept
//! Obtain the current active child state of a given parent (read only)
//!
//! @details
//! Finds the child state of the given `parent`, such that this child
//! state is an ancestor of the currently active state. The main purpose
//! of this function is to support **shallow history** transitions in
//! state machines derived from QHsm.
//!
//! @param[in] parent pointer to the state-handler function
//!
//! @returns
//! the child of a given `parent` state, which is an ancestor of the
//! currently active state
//!
//! @note
//! this function is designed to be called during state transitions, so it
//! does not necessarily start in a stable state configuration.
//! However, the function establishes stable state configuration upon exit.
//!
//! @tr{RQP103}
//! @tr{RQP120H}
QMState const *child = m_state.obj;
bool isFound = false; // start with the child not found
QMState const *s;
for (s = m_state.obj; s != nullptr; s = s->superstate) {
if (s == parent) {
isFound = true; // child is found
break;
}
else {
child = s;
}
}
if (!isFound) { // still not found?
for (s = m_temp.obj; s != nullptr; s = s->superstate) {
if (s == parent) {
isFound = true; // child is found
break;
}
else {
child = s;
}
}
}
//! @post the child must be found
Q_ENSURE_ID(810, isFound);
#ifdef Q_NASSERT
Q_UNUSED_PAR(isFound);
#endif
return child; // return the child
noexcept override
//! Get the current state handler of the QMsm
return m_state.obj->stateHandler;
//! Internal helper function to execute a transition-action table
//!
//! @details
//! Helper function to execute transition sequence in a tran-action table.
//!
//! @param[in] tatbl pointer to the transition-action table
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! the status of the last action from the transition-action table.
//!
//! @note
//! This function is for internal use inside the QEP event processor and
//! should **not** be called directly from the applications.
//! @pre the transition-action table pointer must not be nullptr
Q_REQUIRE_ID(400, tatbl != nullptr);
QState r = Q_RET_NULL;
QS_CRIT_STAT_
for (QActionHandler const *a = &tatbl->act[0]; *a != nullptr; ++a) {
r = (*(*a))(this); // call the action through the 'a' pointer
#ifdef Q_SPY
if (r == Q_RET_ENTRY) {
QS_BEGIN_PRE_(QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_temp.obj->stateHandler); // entered state handler
QS_END_PRE_()
}
else if (r == Q_RET_EXIT) {
QS_BEGIN_PRE_(QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_temp.obj->stateHandler); // exited state handler
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_INIT) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(tatbl->target->stateHandler); // source
QS_FUN_PRE_(m_temp.tatbl->target->stateHandler); // target
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_EP) {
QS_BEGIN_PRE_(QS_QEP_TRAN_EP, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(tatbl->target->stateHandler); // source
QS_FUN_PRE_(m_temp.tatbl->target->stateHandler); // target
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_XP) {
QS_BEGIN_PRE_(QS_QEP_TRAN_XP, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(tatbl->target->stateHandler); // source
QS_FUN_PRE_(m_temp.tatbl->target->stateHandler); // target
QS_END_PRE_()
}
else {
// empty
}
#endif // Q_SPY
}
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
m_state.obj = (r >= Q_RET_TRAN)
? m_temp.tatbl->target
: tatbl->target;
return r;
//! Internal helper function to exit current state to transition source
//!
//! @details
//! Helper function to exit the current state configuration to the
//! transition source, which is a hierarchical state machine might be a
//! superstate of the current state.
//!
//! @param[in] s pointer to the current state
//! @param[in] ts pointer to the transition source state
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
// exit states from the current state to the tran. source state
while (s != ts) {
// exit action provided in state 's'?
if (s->exitAction != nullptr) {
// execute the exit action
(*s->exitAction)(this);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(s->stateHandler); // the exited state handler
QS_END_PRE_()
}
s = s->superstate; // advance to the superstate
// reached the top of a submachine?
if (s == nullptr) {
s = m_temp.obj; // the superstate from QM_SM_EXIT()
Q_ASSERT_ID(510, s != nullptr);
}
}
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
//! Internal helper function to enter state history
//!
//! @details
//! Static helper function to execute the segment of transition to history
//! after entering the composite state and
//!
//! @param[in] hist pointer to the history substate
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! ::Q_RET_INIT, if an initial transition has been executed in the last
//! entered state or ::Q_RET_NULL if no such transition was taken.
QMState const *s = hist;
QMState const *ts = m_state.obj; // transition source
QMState const *epath[MAX_ENTRY_DEPTH_];
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_TRAN_HIST, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(ts->stateHandler); // source state handler
QS_FUN_PRE_(hist->stateHandler); // target state handler
QS_END_PRE_()
std::int_fast8_t i = 0; // entry path index
while (s != ts) {
if (s->entryAction != nullptr) {
Q_ASSERT_ID(620, i < MAX_ENTRY_DEPTH_);
epath[i] = s;
++i;
}
s = s->superstate;
if (s == nullptr) {
ts = s; // force exit from the for-loop
}
}
// retrace the entry path in reverse (desired) order...
while (i > 0) {
--i;
// run entry action in epath[i]
(*epath[i]->entryAction)(this);
QS_BEGIN_PRE_(QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(this);
QS_FUN_PRE_(epath[i]->stateHandler); // entered state handler
QS_END_PRE_()
}
m_state.obj = hist; // set current state to the transition target
// initial tran. present?
QState r;
if (hist->initAction != nullptr) {
r = (*hist->initAction)(this); // execute the transition action
}
else {
r = Q_RET_NULL;
}
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
return r;
//! Maximum number of active objects (configurable value in qf_port.hpp)
//! Valid values: [1U..64U]; default 32U
32U
//! Maximum number of clock rates (configurable value in qf_port.hpp)
//! Valid values: [0U..15U]; default 1U
1U
//! Maximum number of event pools (configurable value in qf_port.hpp)
//! Valid values: [0U..15U]; default 3U
//!
//! @note
//! #QF_MAX_EPOOL set to zero means that dynamic events are NOT configured
//! and should not be used in the application.
3U
//! Size of the QTimeEvt counter (configurable value in qf_port.hpp)
//! Valid values: 1U, 2U, or 4U; default 4U
4U
//! Size of the event-size (configurable value in qf_port.hpp)
//! Valid values: 1U, 2U, or 4U; default 2U
2U
//! Create a QP::QPrioSpec object to specify priority of an AO or a thread
(static_cast<QP::QPrioSpec>((prio_) | (pthre_) << 8U))
//! Allocate a dynamic event (case when QP::QEvt is a POD)
//!
//! @details
//! The macro calls the internal QF function QF::newX_() with
//! margin == QF::NO_MARGIN, which causes an assertion when the event
//! cannot be successfully allocated.
//!
//! @param[in] evtT_ event type (class name) of the event to allocate
//! @param[in] sig_ signal to assign to the newly allocated event
//!
//! @returns a valid event pointer cast to the type `evtT_`.
//!
//! @note
//! If #Q_EVT_CTOR is defined, the Q_NEW() macro becomes variadic and
//! takes all the arguments needed by the constructor of the event
//! class being allocated. The constructor is then called by means
//! of the placement-new operator.
//!
//! @usage
//! The following example illustrates dynamic allocation of an event:
//! @include qf_post.cpp
(static_cast<evtT_ *>( \
QP::QF::newX_(sizeof(evtT_), QP::QF::NO_MARGIN, (sig_))))
//! Allocate a dynamic event (case when QP::QEvt is not a POD)
\
(new(QP::QF::newX_(sizeof(evtT_), QP::QF::NO_MARGIN, (sig_))) \
evtT_((sig_), ##__VA_ARGS__))
//! Non-asserting allocate a dynamic event (case when QP::QEvt is a POD).
//!
//! @details
//! This macro allocates a new event and sets the pointer `e_`, while
//! leaving at least `margin_` of events still available in the pool
//!
//! @param[out] e_ pointer to the newly allocated event
//! @param[in] evtT_ event type (class name) of the event to allocate
//! @param[in] margin_ number of events that must remain available
//! in the given pool after this allocation. The
//! special value QF::NO_MARGIN causes asserting
//! failure in case event allocation fails.
//! @param[in] sig_ signal to assign to the newly allocated event
//!
//! @returns an event pointer cast to the type `evtT_` or NULL if the
//! event cannot be allocated with the specified `margin`.
//!
//! @note
//! If #Q_EVT_CTOR is defined, the Q_NEW_X() macro becomes variadic and
//! takes all the arguments needed by the constructor of the event
//! class being allocated. The constructor is then called by means
//! of the placement-new operator.
//!
//! @usage
//! The following example illustrates dynamic allocation of an event:
//! @include qf_postx.cpp
\
((e_) = static_cast<evtT_ *>(QP::QF::newX_( \
sizeof(evtT_), (margin_), (sig_))))
//! Non-asserting allocate a dynamic event
//! (case when QP::QEvt is not a POD)
do { \
(e_) = static_cast<evtT_ *>( \
QP::QF::newX_(sizeof(evtT_), (margin_), (sig_))); \
if ((e_) != nullptr) { \
new((e_)) evtT_((sig_), ##__VA_ARGS__); \
} \
} while (false)
//! Create a new reference of the current event `e`
//!
//! @details
//! The current event processed by an active object is available only for
//! the duration of the run-to-completion (RTC) step. After that step, the
//! current event is no longer available and the framework might recycle
//! (garbage-collect) the event. The macro Q_NEW_REF() explicitly creates
//! a new reference to the current event that can be stored and used beyond
//! the current RTC step, until the reference is explicitly recycled by
//! means of the macro Q_DELETE_REF().
//!
//! @param[in,out] evtRef_ event reference to create
//! @param[in] evtT_ event type (class name) of the event reference
//!
//! @usage
//! The example **defer** in the directory `examples/win32/defer` illustrates
//! the use of Q_NEW_REF()
//!
//! @sa Q_DELETE_REF()
\
((evtRef_) = static_cast<evtT_ const *>(QP::QF::newRef_(e, (evtRef_))))
//! Delete the event reference
//!
//! @details
//! Every event reference created with the macro Q_NEW_REF() needs to be
//! eventually deleted by means of the macro Q_DELETE_REF() to avoid leaking
//! the event.
//!
//! @param[in,out] evtRef_ event reference to delete
//!
//! @usage
//! The example **defer** in the directory `examples/win32/defer` illustrates
//! the use of Q_DELETE_REF()
//!
//! @sa Q_NEW_REF()
do { \
QP::QF::deleteRef_((evtRef_)); \
(evtRef_) = 0U; \
} while (false)
//! Invoke the event publishing facility QActive::publish_().
//!
//! @details
//! This macro is the recommended way of publishing events, because it
//! provides the vital information for software tracing and avoids any
//! overhead when the tracing is disabled.
//!
//! @param[in] e_ pointer to the posted event
//! @param[in] sender_ pointer to the sender object (actually used
//! only when #Q_SPY is defined)
//!
//! @note
//! The pointer to the `sender_` object is not necessarily a pointer
//! to an active object. In fact, if QActive::PUBLISH() is called from an
//! interrupt or other context, you can create a unique object just to
//! unambiguously identify the sender of the event.
//!
//! @sa QActive::publish_()
\
publish_((e_), (sender_), (sender_)->getPrio())
publish_((e_), nullptr, 0U)
//! Invoke the direct event posting facility QActive::post_()
//!
//! @details
//! This macro asserts if the queue overflows and cannot accept the event.
//!
//! @param[in] e_ pointer to the event to post
//! @param[in] sender_ pointer to the sender object.
//!
//! @note
//! The `sendedr_` parameter is actually only used when QS tracing
//! is enabled (macro #Q_SPY is defined). When QS software tracing is
//! disenabled, the POST() macro does not pass the `sender_` parameter,
//1 so the overhead of passing this extra parameter is entirely avoided.
//!
//! @note
//! the pointer to the sender object is not necessarily a pointer to an
//! active object. In fact, if POST() is called from an interrupt or
//! other context, you can create a unique object just to unambiguously
//! identify the sender of the event.
//!
//! @sa QActive::post_()
post_((e_), QP::QF::NO_MARGIN, (sender_))
post_((e_), QP::QF::NO_MARGIN, nullptr)
//! Invoke the direct event posting facility QActive::post_()
//! without delivery guarantee
//!
//! @details
//! This macro does not assert if the queue overflows and cannot accept
//! the event with the specified margin of free slots remaining.
//!
//! @param[in] e_ pointer to the event to post
//! @param[in] margin_ the minimum free slots in the queue, which
//! must still be available after posting the event.
//! The special value QF::NO_MARGIN causes
//! asserting failure in case event posting fails.
//! @param[in] sender_ pointer to the sender object.
//!
//! @returns
//! 'true' if the posting succeeded, and 'false' if the posting
//! failed due to insufficient margin of free entries available in
//! the queue.
//!
//! @note
//! The `sender_` parameter is actually only used when QS tracing
//! is enabled (macro #Q_SPY is defined). When QS software tracing is
//! disabled, the POST_X() macro does not pass the `sender_` parameter,
//! so the overhead of passing this extra parameter is entirely avoided.
//!
//! @note
//! The pointer to the sender object is not necessarily a pointer
//! to an active object. In fact, if POST_X() is called from an
//! interrupt or other context, you can create a unique object just to
//! unambiguously identify the sender of the event.
//!
//! @usage
//! @include qf_postx.cpp
\
post_((e_), (margin_), (sender_))
post_((e_), (margin_), nullptr)
//! Invoke the system clock tick processing QTimeEvt::tick_()
//!
//! @details
//! This macro is the recommended way of invoking clock tick processing,
//! because it provides the vital information for software tracing and
//! avoids any overhead when the tracing is disabled.
//!
//! @param[in] tickRate_ clock tick rate to be serviced through this call
//! @param[in] sender_ pointer to the sender object. This parameter
//! is actually only used when QS software tracing is enabled
//! (macro #Q_SPY is defined)
//! @note
//! When QS software tracing is disabled, the macro calls
//! QTimeEvt::tick_() without the `sender` parameter, so the overhead
//! of passing this extra parameter is entirely avoided.
//!
//! @note
//! The pointer to the sender object is not necessarily a pointer
//! to an active object. In fact, when TICK_X() is called from
//! an interrupt, you would create a unique object just to unambiguously
//! identify the ISR as the sender of the time events.
//!
//! @sa QTimeEvt::tick_()
tick_((tickRate_), (sender_))
tick_((tickRate_), nullptr)
//! Invoke the system clock tick processing for rate 0
//! @sa TICK_X()
TICK_X(0U, (sender_))
//! No-operation for exiting a critical section
//!
//! @details
//! In some QF ports the critical section exit takes effect only on the
//! next machine instruction. If this next instruction is another entry
//! to a critical section, the critical section won't be really exited,
//! but rather the two adjecent critical sections would be merged.
//! The QF_CRIT_EXIT_NOP() macro contains minimal code required to
//! prevent such merging of critical sections in such merging of
//! critical sections in QF ports, in which it can occur.
(static_cast<void>(0))
//! QF context switch callback used in built-in kernels
//!
//! @details
//! This callback function provides a mechanism to perform additional
//! custom operations when one of the built-in kernels switches context
//! from one thread to another.
//!
//! @param[in] prev pointer to the previous thread (active object)
//! (prev==0 means that `prev` was the QK idle loop)
//! @param[in] next pointer to the next thread (active object)
//! (next==0) means that `next` is the QK idle loop)
//! @attention
//! QF_onContextSw() is invoked with interrupts **disabled** and must also
//! return with interrupts **disabled**.
//!
//! @note
//! This callback is enabled by defining the macro #QF_ON_CONTEXT_SW.
//!
//! @include qf_oncontextsw.cpp
//! bitmask for the internal representation of QPSet elements
= std::uint16_t;
= std::uint32_t;
= std::uint8_t;
//! Data type to store the block-size defined based on the macro
//! #QF_TIMEEVT_CTR_SIZE.
//!
//! @details
//! The dynamic range of this data type determines the maximum block
//! size that can be managed by the pool.
= std::uint16_t;
= std::uint8_t;
= std::uint32_t;
//! Priority specification for Active Objects in QP
//!
//! @details
//! Active Object priorities in QP are integer numbers in the range
//! [1..#QF_MAX_ACTIVE], whereas the special priority number 0 is reserved
//! for the lowest-priority idle thread. The QP framework uses the *direct*
//! priority numbering, in which higher numerical values denote higher
//! urgency. For example, an AO with priority 32 has higher urgency than
//! an AO with priority 23.
//!
//! QP::QPrioSpec allows an application developer to assign **two**
//! priorities to a given AO (see also Q_PRIO()):
//!
//! 1. The "QF-priority", which resides in the least-significant byte
//! of the QP::QPrioSpec data type. The "QF-priority" must be **unique**
//! for each thread in the system and higher numerical values represent
//! higher urgency (direct pirority numbering).
//!
//! 2. The "preemption-threshold" priority, which resides in the most-
//! significant byte of the ::QPrioSpec data type. The second priority
//! cannot be lower than the "QF-priority", but does NOT need to be
//! unuque.
//!
//! In the QP native preemptive kernels, like QK and QXK, the "preemption-
//! threshold" priority is used as to implement the "preemption-threshold
//! scheduling" (PTS). It determines the conditions under which a given
//! thread can be *preempted* by other threads. Specifically, a given
//! thread can be preempted only by another thread with a *higher*
//! priority than the "preemption-threshold" of the original thread.
//!
//! ![QF-priority and preemption-threshold relations](qp-prio.png)
//!
//! @note
//! For backwards-compatibility, QP::QPrioSpec data type might contain only
//! the "QF-priority" component (and the "preemption-threshold" component
//! left at zero). In that case, the "preemption-threshold" will be assumed
//! to be the same as the "QF-priority". This corresponds exactly to the
//! previous semantics of AO priority.
//!
//! @remark
//! When QP runs on top of 3rd-party kernels/RTOSes or general-purpose
//! operating systems, sthe second priority can have different meaning,
//! depending on the specific RTOS/GPOS used.
= std::uint16_t;
//! The scheduler lock status used in some real-time kernels
= std::uint_fast16_t;
//! Priority Set of up to #QF_MAX_ACTIVE elements
//!
//! @details
//! The priority set represents the set of active objects that are ready to
//! run and need to be considered by the scheduling algorithm. The set is
//! capable of storing up to #QF_MAX_ACTIVE priority levels, which can be
//! configured in the rage 1..64, inclusive.
//! bitmask with a bit for each element
//! bitmasks with a bit for each element
noexcept
//! Make the priority set empty
#if (QF_MAX_ACTIVE <= 32U)
m_bits = 0U;
#else
m_bits[0] = 0U;
m_bits[1] = 0U;
#endif
const noexcept
//! Return 'true' if the priority set is empty
#if (QF_MAX_ACTIVE <= 32U)
return (m_bits == 0U);
#else
return (m_bits[0] == 0U) ? (m_bits[1] == 0U) : false;
#endif
const noexcept
//! Return 'true' if the priority set is NOT empty
#if (QF_MAX_ACTIVE <= 32U)
return (m_bits != 0U);
#else
return (m_bits[0] != 0U) ? true : (m_bits[1] != 0U);
#endif
const noexcept
//! Return 'true' if the priority set has the element n.
#if (QF_MAX_ACTIVE <= 32U)
return (m_bits & (1U << (n - 1U))) != 0U;
#else
return (n <= 32U)
? ((m_bits[0] & (static_cast<std::uint32_t>(1) << (n - 1U))) != 0U)
: ((m_bits[1] & (static_cast<std::uint32_t>(1) << (n - 33U))) != 0U);
#endif
noexcept
//! insert element `n` into the set (n = 1..QF_MAX_ACTIVE)
#if (QF_MAX_ACTIVE <= 32U)
m_bits = (m_bits | (1U << (n - 1U)));
#else
if (n <= 32U) {
m_bits[0] = (m_bits[0]
| (static_cast<std::uint32_t>(1) << (n - 1U)));
}
else {
m_bits[1] = (m_bits[1]
| (static_cast<std::uint32_t>(1) << (n - 33U)));
}
#endif
noexcept
//! Remove element `n` from the set (n = 1U..64U)
#if (QF_MAX_ACTIVE <= 32U)
m_bits = (m_bits & static_cast<QPSetBits>(
~(static_cast<QPSetBits>(1) << (n - 1U))));
#else
if (n <= 32U) {
(m_bits[0] = (m_bits[0]
& ~(static_cast<std::uint32_t>(1) << (n - 1U))));
}
else {
(m_bits[1] = (m_bits[1]
& ~(static_cast<std::uint32_t>(1) << (n - 33U))));
}
#endif
const noexcept
//! Find the maximum element in the set, returns zero if the set is empty
#if (QF_MAX_ACTIVE <= 32U)
return QF_LOG2(m_bits);
#else
return (m_bits[1] != 0U)
? (QF_LOG2(m_bits[1]) + 32U)
: (QF_LOG2(m_bits[0]));
#endif
const noexcept
//! Log-base-2 calculation when hardware acceleration
//! is NOT provided (#QF_LOG2 not defined).
static std::uint8_t const log2LUT[16] = {
0U, 1U, 2U, 2U, 3U, 3U, 3U, 3U,
4U, 4U, 4U, 4U, 4U, 4U, 4U, 4U
};
std::uint_fast8_t n = 0U;
QP::QPSetBits t;
#if (QF_MAX_ACTIVE > 16U)
t = static_cast<QP::QPSetBits>(x >> 16U);
if (t != 0U) {
n += 16U;
x = t;
}
#endif
#if (QF_MAX_ACTIVE > 8U)
t = (x >> 8U);
if (t != 0U) {
n += 8U;
x = t;
}
#endif
t = (x >> 4U);
if (t != 0U) {
n += 4U;
x = t;
}
return n + log2LUT[x];
//! Subscriber List (for publish-subscribe)
//!
//! @details
//! This data type represents a set of Active Objects that subscribe to
//! a given signal. The set is represented as priority-set, where each bit
//! corresponds to the unique QF-priority of an AO (see QP::QPrioSpec).
= QPSet;
//! QP::QActive active object class (based on the QP::QHsm-style
//! implementation strategy)
//!
//! @details
//! Active objects are encapsulated tasks (each containing an event queue and
//! a state machine) that communicate with one another asynchronously by
//! sending and receiving events. Within an active object, events are
//! processed in a run-to-completion (RTC) fashion, while QF encapsulates
//! all the details of thread-safe event exchange and queuing.<br>
//!
//! QP::QActive represents an active object that uses the QP::QHsm-style
//! implementation strategy for state machines. This strategy is tailored
//! to manual coding, but it is also supported by the QM modeling tool.
//! The resulting code is slower than in the QP::QMsm-style implementation
//! strategy.
//!
//! @note
//! QP::QActive is not intended to be instantiated directly, but rather serves
//! as the abstract base class for derivation of active objects in the
//! applications.
//!
//! @sa QP::QMActive
//!
//! @usage
//! The following example illustrates how to derive an active object from
//! QP::QActive.
//! @include qf_qactive.cpp
//! OS-dependent event-queue type
//!
//! @details
//! The type of the queue depends on the underlying operating system or
//! a kernel. Many kernels support "message queues" that can be adapted
//! to deliver QF events to the active object. Alternatively, QF provides
//! a native event queue implementation that can be used as well.
//!
//! @note
//! The native QF event queue is configured by defining the macro
//! #QF_EQUEUE_TYPE as QP::QEQueue.
//! OS-dependent per-thread object
//!
//! @details
//! This data might be used in various ways, depending on the QF port.
//! In some ports m_osObject is used to block the calling thread when
//! the native QF queue is empty. In other QF ports the OS-dependent
//! object might be used differently.
//! OS-dependent representation of the thread of the active object
//!
//! @details
//! This data might be used in various ways, depending on the QF port.
//! In some ports m_thread is used store the thread handle. In other ports
//! m_thread can be a pointer to the Thread-Local-Storage (TLS).
//! QF-priority [1..#QF_MAX_ACTIVE] of this AO.
//! @sa QP::QPrioSpec
//! preemption-threshold [1..#QF_MAX_ACTIVE] of this AO.
//! @sa QP::QPrioSpec
//! Internal array of registered active objects
//! pointer to the array of all subscriber AOs for a given event signal
//! The maximum published signal (the size of the subscrList_ array)
noexcept
//! protected constructor (abstract class)
: QHsm(initial),
m_prio(0U),
m_pthre(0U)
#ifdef QF_EQUEUE_TYPE
QF::bzero(&m_eQueue, sizeof(m_eQueue));
#endif
#ifdef QF_OS_OBJECT_TYPE
QF::bzero(&m_osObject, sizeof(m_osObject));
#endif
#ifdef QF_THREAD_TYPE
QF::bzero(&m_thread, sizeof(m_thread));
#endif
//! Starts execution of an active object and registers the object
//! with the framework
//!
//! @details
//! Starts execution of the AO and registers the AO with the framework.
//!
//! @param[in] prioSpec priority specification of the AO containing the
//! QF-priority and (optionally) preemption-threshold of this AO
//! (for preemptive kernels that support it). See also QP::QPrioSpec.
//! @param[in] qSto pointer to the storage for the ring buffer of the
//! event queue
//! @param[in] qLen length of the event queue [# QP::QEvt* pointers]
//! @param[in] stkSto pointer to the stack storage (might be nullptr)
//! @param[in] stkSize stack size [bytes]
//! @param[in] par pointer to an extra parameter (might be nullptr)
//!
//! @usage
//! The following example shows starting an AO when a per-task stack
//! is needed:
//! @include qf_start.cpp
//! Overloaded start function (no initialization parameter)
this->start(prioSpec, qSto, qLen, stkSto, stkSize, nullptr);
//! Stops execution of an active object and removes it from the
//! framework's supervision
//!
//! @attention
//! QActive::stop() must be called only from the AO that is about
//! to stop its execution. By that time, any pointers or references
//! to the AO are considered invalid (dangling) and it becomes
//! illegal for the rest of the application to post events to the AO.
noexcept
//! Posts an event `e` directly to the event queue of the active object
//! using the First-In-First-Out (FIFO) policy.
//!
//! @details
//! Direct event posting is the simplest asynchronous communication
//! method available in QF.
//!
//! @param[in] e pointer to the event to be posted
//! @param[in] margin number of required free slots in the queue
//! after posting the event or QF::NO_MARGIN.
//! @param[in] sender pointer to a sender object (used in QS only)
//!
//! @returns
//! 'true' (success) if the posting succeeded (with the provided margin)
//! and 'false' (failure) when the posting fails.
//!
//! @attention
//! For `margin` == QF::NO_MARGIN, this function will assert internally
//! if the event posting fails. In that case, it is unnecessary to check
//! the retrun value from this function.
//!
//! @note
//! This function might be implemented differentyl in various QP/C++
//! ports. The provided implementation assumes that the QP::QEQueue
//! class is used for the QP::QActive event queue.
//!
//! @usage
//! @include qf_post.cpp
//!
//! @sa
//! QActive::postLIFO()
Q_UNUSED_PAR(sender); // when Q_SPY not defined
//! @pre event pointer must be valid
Q_REQUIRE_ID(100, e != nullptr);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = m_eQueue.m_nFree; // get volatile into the temporary
// test-probe#1 for faking queue overflow
QS_TEST_PROBE_DEF(&QActive::post_)
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
bool status;
if (margin == QF::NO_MARGIN) {
if (nFree > 0U) {
status = true; // can post
}
else {
status = false; // cannot post
Q_ERROR_CRIT_(110); // must be able to post the event
}
}
else if (nFree > static_cast<QEQueueCtr>(margin)) {
status = true; // can post
}
else {
status = false; // cannot post, but don't assert
}
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
if (status) { // can post the event?
--nFree; // one free entry just used up
m_eQueue.m_nFree = nFree; // update the volatile
if (m_eQueue.m_nMin > nFree) {
m_eQueue.m_nMin = nFree; // update minimum so far
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool-Id & ref-ctr
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(m_eQueue.m_nMin); // min number of free entries
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
// callback to examine the posted event under the same conditions
// as producing the #QS_QF_ACTIVE_POST trace record, which are:
// the local filter for this AO ('me->prio') is set
//
if (QS_LOC_CHECK_(m_prio)) {
QS::onTestPost(sender, this, e, status);
}
#endif
// empty queue?
if (m_eQueue.m_frontEvt == nullptr) {
m_eQueue.m_frontEvt = e; // deliver event directly
QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue
}
// queue is not empty, insert event into the ring-buffer
else {
// insert event pointer e into the buffer (FIFO)
m_eQueue.m_ring[m_eQueue.m_head] = e;
// need to wrap head?
if (m_eQueue.m_head == 0U) {
m_eQueue.m_head = m_eQueue.m_end; // wrap around
}
// advance the head (counter clockwise)
m_eQueue.m_head = (m_eQueue.m_head - 1U);
}
QF_CRIT_X_();
}
else { // cannot post the event
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_ATTEMPT, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool-Id & ref-ctr
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(margin); // margin requested
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
// callback to examine the posted event under the same conditions
// as producing the #QS_QF_ACTIVE_POST trace record, which are:
// the local filter for this AO ('me->prio') is set
//
if (QS_LOC_CHECK_(m_prio)) {
QS::onTestPost(sender, this, e, status);
}
#endif
QF_CRIT_X_();
#if (QF_MAX_EPOOL > 0U)
QF::gc(e); // recycle the event to avoid a leak
#endif
}
return status;
noexcept
//! Posts an event `e` directly to the event queue of the active object
//! using the Last-In-First-Out (LIFO) policy.
//!
//! @details
//! The LIFO policy should be used only for self-posting and with caution,
//! because it alters order of events in the queue.
//!
//! @param[in] e pointer to the event to be posted
//!
//! @attention
//! This function asserts internally if the posting fails.
//!
//! @note
//! This function might be implemented differentyl in various QP/C++
//! ports. The provided implementation assumes that the QP::QEQueue
//! class is used for the QActive event queue.
//!
//! @sa
//! QActive::post()
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = m_eQueue.m_nFree;// tmp to avoid UB for volatile access
QS_TEST_PROBE_DEF(&QActive::postLIFO)
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
// the queue must be able to accept the event (cannot overflow)
Q_ASSERT_CRIT_(210, nFree != 0U);
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
--nFree; // one free entry just used up
m_eQueue.m_nFree = nFree; // update the volatile
if (m_eQueue.m_nMin > nFree) {
m_eQueue.m_nMin = nFree; // update minimum so far
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_LIFO, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool-Id & ref-ctr
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(m_eQueue.m_nMin); // min number of free entries
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
// callback to examine the posted event under the same conditions
// as producing the #QS_QF_ACTIVE_POST trace record, which are:
// the local filter for this AO ('me->prio') is set
//
if (QS_LOC_CHECK_(m_prio)) {
QS::onTestPost(nullptr, this, e, true);
}
#endif
// read volatile into temporary
QEvt const * const frontEvt = m_eQueue.m_frontEvt;
m_eQueue.m_frontEvt = e; // deliver the event directly to the front
// was the queue empty?
if (frontEvt == nullptr) {
QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue
}
// queue was not empty, leave the event in the ring-buffer
else {
m_eQueue.m_tail = (m_eQueue.m_tail + 1U);
if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail?
m_eQueue.m_tail = 0U; // wrap around
}
m_eQueue.m_ring[m_eQueue.m_tail] = frontEvt;
}
QF_CRIT_X_();
noexcept
//! Get an event from the event queue of an active object
//!
//! @details
//! The behavior of this function depends on the kernel used in the
//! QF port. For built-in kernels (Vanilla or QK) the function can be
//! called only when the queue is not empty, so it doesn't block. For
//! a blocking kernel/OS the function can block and wait for delivery
//! of an event.
//!
//! @returns
//! A pointer to the received event. The returned pointer is guaranteed
//! to be valid (can't be nullptr).
//!
//! @note
//! This function might be implemented differentyl in various QP/C++
//! ports. The provided implementation assumes that the QP::QEQueue
//! class is used for the QActive event queue.
QF_CRIT_STAT_
QF_CRIT_E_();
QACTIVE_EQUEUE_WAIT_(this); // wait for event to arrive directly
// always remove evt from the front
QEvt const * const e = m_eQueue.m_frontEvt;
QEQueueCtr const nFree = m_eQueue.m_nFree + 1U;
m_eQueue.m_nFree = nFree; // upate the number of free
// any events in the ring buffer?
if (nFree <= m_eQueue.m_end) {
// remove event from the tail
m_eQueue.m_frontEvt = m_eQueue.m_ring[m_eQueue.m_tail];
if (m_eQueue.m_tail == 0U) { // need to wrap?
m_eQueue.m_tail = m_eQueue.m_end; // wrap around
}
m_eQueue.m_tail = (m_eQueue.m_tail - 1U);
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool-Id & ref-ctr
QS_EQC_PRE_(nFree); // number of free entries
QS_END_NOCRIT_PRE_()
}
else {
// the queue becomes empty
m_eQueue.m_frontEvt = nullptr;
// all entries in the queue must be free (+1 for fronEvt)
Q_ASSERT_CRIT_(310, nFree == (m_eQueue.m_end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET_LAST, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool-Id & ref-ctr
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return e;
const noexcept
//! Subscribes for delivery of signal `sig` to the active object
//!
//! @details
//! This function is part of the Publish-Subscribe event delivery
//! mechanism available in QF. Subscribing to an event means that the
//! framework will start posting all published events with a given signal
//! `sig` to the event queue of the active object.
//!
//! @param[in] sig event signal to subscribe
//!
//! The following example shows how the Table active object subscribes
//! to three signals in the initial transition:
//! @include qf_subscribe.cpp
//!
//! @sa
//! QActive::publish_(), QActive::unsubscribe(), and
//! QActive::unsubscribeAll()
std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio);
Q_REQUIRE_ID(300, (Q_USER_SIG <= sig)
&& (sig < maxPubSignal_)
&& (0U < p) && (p <= QF_MAX_ACTIVE)
&& (registry_[p] == this));
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_SUBSCRIBE, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_END_NOCRIT_PRE_()
subscrList_[sig].insert(p); // insert into subscriber-list
QF_CRIT_X_();
const noexcept
//! Unsubscribes from the delivery of signal `sig` to the active object
//!
//! @details
//! This function is part of the Publish-Subscribe event delivery
//! mechanism available in QF. Un-subscribing from an event means that
//! the framework will stop posting published events with a given signal
//! `sig` to the event queue of the active object.
//!
//! @param[in] sig event signal to unsubscribe
//!
//! @note
//! Due to the latency of event queues, an active object should NOT
//! assume that a given signal `sig` will never be dispatched to the
//! state machine of the active object after un-subscribing from that
//! signal. The event might be already in the queue, or just about to
//! be posted and the un-subscribe operation will not flush such events.
//!
//! @note
//! Un-subscribing from a signal that has never been subscribed in the
//! first place is considered an error and QF will raise an assertion.
//!
//! @sa
//! QActive::publish_(), QActive::subscribe(), and
//! QActive::unsubscribeAll()
std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio);
//! @pre the signal and the priority must be in range, the AO must also
// be registered with the framework
Q_REQUIRE_ID(400, (Q_USER_SIG <= sig)
&& (sig < maxPubSignal_)
&& (0U < p) && (p <= QF_MAX_ACTIVE)
&& (registry_[p] == this));
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_UNSUBSCRIBE, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_END_NOCRIT_PRE_()
subscrList_[sig].remove(p); // remove from subscriber-list
QF_CRIT_X_();
const noexcept
//! Unsubscribes from the delivery of all signals to the active object
//!
//! @details
//! This function is part of the Publish-Subscribe event delivery
//! mechanism available in QF. Un-subscribing from all events means that
//! the framework will stop posting any published events to the event
//! queue of the active object.
//!
//! @note
//! Due to the latency of event queues, an active object should NOT
//! assume that no events will ever be dispatched to the state machine of
//! the active object after un-subscribing from all events.
//! The events might be already in the queue, or just about to be posted
//! and the un-subscribe operation will not flush such events. Also, the
//! alternative event-delivery mechanisms, such as direct event posting or
//! time events, can be still delivered to the event queue of the active
//! object.
//!
//! @sa
//! QActive::publish_(), QActive::subscribe(), and QActive::unsubscribe()
std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio);
Q_REQUIRE_ID(500, (0U < p) && (p <= QF_MAX_ACTIVE)
&& (registry_[p] == this));
for (enum_t sig = Q_USER_SIG; sig < maxPubSignal_; ++sig) {
QF_CRIT_STAT_
QF_CRIT_E_();
if (subscrList_[sig].hasElement(p)) {
subscrList_[sig].remove(p);
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_UNSUBSCRIBE, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
// prevent merging critical sections
QF_CRIT_EXIT_NOP();
}
const noexcept
//! Defer an event to a given separate event queue
//!
//! @details
//! This function is part of the event deferral support. An active object
//! uses this function to defer an event `e` to the QF-supported native
//! event queue `eq`. QF correctly accounts for another outstanding
//! reference to the event and will not recycle the event at the end of
//! the RTC step. Later, the active object might recall one event at a
//! time from the event queue.
//!
//! @param[in] eq pointer to a "raw" thread-safe queue to recall
//! an event from.
//! @param[in] e pointer to the event to be deferred
//!
//! @returns
//! 'true' (success) when the event could be deferred and 'false'
//! (failure) if event deferral failed due to overflowing the queue.
//!
//! An active object can use multiple event queues to defer events of
//! different kinds.
//!
//! @sa
//! QActive::recall(), QP::QEQueue, QActive::flushDeferred()
bool const status = eq->post(e, 0U, m_prio);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_ACTIVE_DEFER, m_prio)
QS_TIME_PRE_(); // time stamp
QS_OBJ_PRE_(this); // this active object
QS_OBJ_PRE_(eq); // the deferred queue
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & ref Count
QS_END_PRE_()
return status;
noexcept
//! Recall a deferred event from a given event queue
//!
//! @details
//! This function is part of the event deferral support. An active object
//! uses this function to recall a deferred event from a given QF
//! event queue. Recalling an event means that it is removed from the
//! deferred event queue `eq` and posted (LIFO) to the event queue of
//! the active object.
//!
//! @param[in] eq pointer to a "raw" thread-safe queue to recall
//! an event from.
//!
//! @returns
//! 'true' if an event has been recalled and 'false' if not.
//!
//! @note
//! An active object can use multiple event queues to defer events of
//! different kinds.
//!
//! @sa
//! QActive::recall(), QActive::postLIFO_(), QP::QEQueue
QEvt const * const e = eq->get(m_prio); // get evt from deferred queue
bool recalled;
// event available?
if (e != nullptr) {
QActive::postLIFO(e); // post it to the _front_ of the AO's queue
QF_CRIT_STAT_
QF_CRIT_E_();
// is it a dynamic event?
if (e->poolId_ != 0U) {
// after posting to the AO's queue the event must be referenced
// at least twice: once in the deferred event queue (eq->get()
// did NOT decrement the reference counter) and once in the
// AO's event queue.
Q_ASSERT_CRIT_(210, e->refCtr_ >= 2U);
// we need to decrement the reference counter once, to account
// for removing the event from the deferred event queue.
QEvt_refCtr_dec_(e); // decrement the reference counter
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_RECALL, m_prio)
QS_TIME_PRE_(); // time stamp
QS_OBJ_PRE_(this); // this active object
QS_OBJ_PRE_(eq); // the deferred queue
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & ref Count
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
recalled = true;
}
else {
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_ACTIVE_RECALL_ATTEMPT, m_prio)
QS_TIME_PRE_(); // time stamp
QS_OBJ_PRE_(this); // this active object
QS_OBJ_PRE_(eq); // the deferred queue
QS_END_PRE_()
recalled = false;
}
return recalled;
const noexcept
//! Flush the specified deferred queue 'eq'
//!
//! @details
//! This function is part of the event deferral support. An active object
//! can use this function to flush a given QF event queue. The function
//! makes sure that the events are not leaked.
//!
//! @param[in] eq pointer to a "raw" thread-safe queue to flush.
//!
//! @returns
//! the number of events actually flushed from the queue.
//!
//! @sa
//! QActive::defer(), QActive::recall(), QP::QEQueue
std::uint_fast16_t n = 0U;
for (QEvt const *e = eq->get(m_prio);
e != nullptr;
e = eq->get(m_prio))
{
++n; // count the flushed event
#if (QF_MAX_EPOOL > 0U)
QF::gc(e); // garbage collect
#endif
}
return n;
const noexcept
//! Get the priority of the active object.
return static_cast<std::uint_fast8_t>(m_prio);
noexcept
//! Set the priority of the active object.
m_prio = static_cast<std::uint8_t>(prio & 0xFFU);
m_pthre = static_cast<std::uint8_t>(prio >> 8U);
//! Generic setting of additional attributes (useful in QP ports)
noexcept
//! accessor to the OS-object for extern "C" functions, such as
//! the QK or QXK schedulers
return m_osObject;
noexcept
//! accessor to the Thread for extern "C" functions, such as
//! the QK or QXK schedulers
return m_thread;
noexcept
//! Publish-subscribe initialization
//!
//! @details
//! This function initializes the publish-subscribe facilities of QF and must
//! be called exactly once before any subscriptions/publications occur in
//! the application.
//!
//! @param[in] subscrSto pointer to the array of subscriber lists
//! @param[in] maxSignal the dimension of the subscriber array and at
//! the same time the maximum signal that can be
//! published or subscribed.
//!
//! The array of subscriber-lists is indexed by signals and provides a mapping
//! between the signals and subscriber-lists. The subscriber-lists are
//! bitmasks of type QP::QSubscrList, each bit in the bit mask corresponding
//! to the unique priority of an active object. The size of the
//! QP::QSubscrList bitmask depends on the value of the #QF_MAX_ACTIVE macro.
//!
//! @note
//! The publish-subscribe facilities are optional, meaning that you might
//! choose not to use publish-subscribe. In that case calling QF::psInit()
//! and using up memory for the subscriber-lists is unnecessary.
//!
//! @sa
//! QP::QSubscrList
//!
//! @usage
//! The following example shows the typical initialization sequence of QF:
//! @include qf_main.cpp
subscrList_ = subscrSto;
maxPubSignal_ = maxSignal;
QF::bzero(subscrSto, static_cast<unsigned>(maxSignal)
* sizeof(QSubscrList));
noexcept
//! Publish event to all subscribers of a given signal `e->sig`
//!
//! @details
//! This function posts (using the FIFO policy) the event @a e to **all**
//! active objects that have subscribed to the signal @a e->sig, which is
//! called _multicasting_. The multicasting performed in this function is
//! very efficient based on reference-counting inside the published event
//! ("zero-copy" event multicasting). This function is designed to be
//! callable from any part of the system, including ISRs, device drivers,
//! and active objects.
//!
//! @note
//! To avoid any unexpected re-ordering of events posted into AO queues,
//! the event multicasting is performed with scheduler **locked**.
//! However, the scheduler is locked only up to the priority level of
//! the highest-priority subscriber, so any AOs of even higher priority,
//! which did not subscribe to this event are *not* affected.
Q_UNUSED_PAR(sender); // when Q_SPY not defined
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
//! @pre the published signal must be within the configured range
Q_REQUIRE_ID(100, static_cast<enum_t>(e->sig) < maxPubSignal_);
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_PUBLISH, qs_id)
QS_TIME_PRE_(); // the timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt
QS_END_NOCRIT_PRE_()
// is it a dynamic event?
if (e->poolId_ != 0U) {
// NOTE: The reference counter of a dynamic event is incremented to
// prevent premature recycling of the event while the multicasting
// is still in progress. At the end of the function, the garbage
// collector step (QF::gc()) decrements the reference counter and
// recycles the event if the counter drops to zero. This covers the
// case when the event was published without any subscribers.
//
QEvt_refCtr_inc_(e);
}
// make a local, modifiable copy of the subscriber list
QPSet subscrList = subscrList_[e->sig];
QF_CRIT_X_();
if (subscrList.notEmpty()) { // any subscribers?
// the highest-prio subscriber
std::uint_fast8_t p = subscrList.findMax();
QActive *a = registry_[p];
QF_SCHED_STAT_
QF_SCHED_LOCK_(a->m_prio); // lock the scheduler up to AO's prio
do { // loop over all subscribers
// the prio of the AO must be registered with the framework
Q_ASSERT_ID(210, a != nullptr);
// POST() asserts internally if the queue overflows
a->POST(e, sender);
subscrList.remove(p); // remove the handled subscriber
if (subscrList.notEmpty()) { // still more subscribers?
p = subscrList.findMax(); // the highest-prio subscriber
a = registry_[p];
}
else {
p = 0U; // no more subscribers
}
} while (p != 0U);
QF_SCHED_UNLOCK_(); // unlock the scheduler
}
// The following garbage collection step decrements the reference counter
// and recycles the event if the counter drops to zero. This covers both
// cases when the event was published with or without any subscribers.
//
#if (QF_MAX_EPOOL > 0U)
QF::gc(e);
#endif
//! Thread routine for executing an active object `act`
noexcept
//! Register this active object to be managed by the framework
//!
//! @details
//! This function adds a given active object to the active objects
//! managed by the QF framework. It should not be called by the
//! application directly, only through the function QActive::start().
//!
//! @note
//! The priority of the active object a should be set before calling
//! this function.
//!
//! @sa QActive::unregister_()
if (m_pthre == 0U) { // preemption-threshold not defined?
m_pthre = m_prio; // apply the default
}
#ifndef Q_NASSERT
//! @pre
//! 1. the "QF-priority" of the AO must be in range
//! 2. the "QF-priority" must not be already in use (unique priority)
//! 3. the "QF-priority" must not exceed the "preemption-threshold"
Q_REQUIRE_ID(100, (0U < m_prio) && (m_prio <= QF_MAX_ACTIVE)
&& (registry_[m_prio] == nullptr)
&& (m_prio <= m_pthre));
std::uint8_t prev_thre = m_pthre;
std::uint8_t next_thre = m_pthre;
std::uint_fast8_t p;
for (p = static_cast<std::uint_fast8_t>(m_prio) - 1U; p > 0U; --p) {
if (registry_[p] != nullptr) {
prev_thre = registry_[p]->m_pthre;
break;
}
}
for (p = static_cast<std::uint_fast8_t>(m_prio) + 1U;
p <= QF_MAX_ACTIVE; ++p)
{
if (registry_[p] != nullptr) {
next_thre = registry_[p]->m_pthre;
break;
}
}
//! @post
//! 1. the preceding pre-thre must not exceed the preemption-threshold
//! 2. the preemption-threshold must not exceed the next pre-thre
Q_ENSURE_ID(110, (prev_thre <= m_pthre) && (m_pthre <= next_thre));
#endif // Q_NASSERT
QF_CRIT_STAT_
QF_CRIT_E_();
// register the AO at the "QF-priority"
registry_[m_prio] = this;
QF_CRIT_X_();
noexcept
//! Un-register the active object from the framework.
//!
//! @details
//! This function un-registers a given active object from the active objects
//! managed by the QF framework. It should not be called by the QP ports.
//!
//! @param[in] a pointer to the active object to remove from the
//! framework.
//!
//! @note
//! The active object that is removed from the framework can no longer
//! participate in any event exchange.
//!
//! @sa QActive::register_()
std::uint_fast8_t const p = static_cast<std::uint_fast8_t>(m_prio);
Q_REQUIRE_ID(200, (0U < p) && (p <= QF_MAX_ACTIVE)
&& (registry_[p] == this));
QF_CRIT_STAT_
QF_CRIT_E_();
registry_[p] = nullptr; // free-up the priority level
m_state.fun = nullptr; // invalidate the state
QF_CRIT_X_();
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
//! QMActive active object (based on QP::QMsm implementation)
//!
//! @details
//! QP::QMActive represents an active object that uses the QP::QMsm-style
//! state machine implementation strategy. This strategy requires the use of
//! the QM modeling tool to generate state machine code automatically, but
//! the code is faster than in the QP::QHsm-style implementation strategy
//! and needs less run-time support (smaller event-processor).
//!
//! @note
//! QP::QMActive is not intended to be instantiated directly, but rather
//! serves as the base class for derivation of active objects in the
//! applications.
//!
//! @sa QP::QActive
//!
//! @usage
//! The following example illustrates how to derive an active object from
//! QP::QMActive.
//! @include qf_qmactive.cpp
//! inherited from QP::QHsm, but disallowed in QP::QMActive
;
//! inherited from QP::QHsm, but disallowed in QP::QMActive
;
//! inherited from QP::QHsm, but disallowed in QP::QMActive
;
noexcept
//! protected constructor (abstract class)
: QActive(initial)
m_temp.fun = initial;
override
//! delegate to QP::QMsm::init()
m_state.obj = &QMsm::msm_top_s;
QF_QMACTIVE_TO_QMSM_CAST_(this)->QMsm::init(e, qs_id);
override
//! delegate to QP::QMsm::init()
m_state.obj = &QMsm::msm_top_s;
QF_QMACTIVE_TO_QMSM_CAST_(this)->QMsm::init(qs_id);
override
//! delegate to QMsm::dispatch()
QF_QMACTIVE_TO_QMSM_CAST_(this)->QMsm::dispatch(e, qs_id);
const noexcept
//! Tests if a given state is part of the active state configuration
return QF_QMACTIVE_TO_QMSM_CONST_CAST_(this)->QMsm::isInState(st);
const noexcept
//! Return the current active state object (read only)
return m_state.obj;
const noexcept
//! Return the current active state object (read only)
return QF_QMACTIVE_TO_QMSM_CONST_CAST_(this)
->QMsm::childStateObj(parent);
noexcept override
//! Get the current state handler of the QP::QMsm
return QF_QMACTIVE_TO_QMSM_CAST_(this)->QMsm::getStateHandler();
//! Time Event class (inherits QP:QEvt)
//!
//! @details
//! Time events are special QF events equipped with the notion of time
//! passage. The basic usage model of the time events is as follows. An
//! active object allocates one or more QTimeEvt objects (provides the
//! storage for them). When the active object needs to arrange for a timeout,
//! it arms one of its time events to fire either just once (one-shot) or
//! periodically. Each time event times out independently from the others,
//! so a QF application can make multiple parallel timeout requests (from the
//! same or different active objects). When QF detects that the appropriate
//! moment has arrived, it inserts the time event directly into the
//! recipient's event queue. The recipient then processes the time event just
//! like any other event.
//! <br>
//! //! Time events, as any other QF events derive from the QP::QEvt base
//! class. Typically, you will use a time event as-is, but you can also
//! further derive more specialized time events from it by adding some more
//! data members and/or specialized functions that operate on the specialized
//! time events.
//! <br>
//! Internally, the armed time events are organized into a bi-directional
//! linked list. This linked list is scanned in every invocation of the
//! QTimeEvt::tick_() function. Only armed (timing out) time events are in the
//! list, so only armed time events consume CPU cycles.
//!
//! @note
//! QF manages the time events in the macro TICK_X(), which must be called
//! periodically, from the clock tick ISR or from the special QP::QTicker
//! active object.
//!
//! @note
//! Even though QP::QTimeEvt is a subclass of QP::QEvt, QP::QTimeEvt instances
//! can NOT be allocated dynamically from event pools. In other words, it is
//! illegal to allocate QP::QTimeEvt instances with the Q_NEW() or Q_NEW_X()
//! macros.
//! link to the next time event in the list
//! the active object that receives the time events
//!
//! @details
//! The m_act pointer is reused inside the QP implementation to hold
//! the head of the list of newly armed time events.
//! the internal down-counter of the time event
//!
//! @details
//! The down-counter is decremented by 1 in every TICK_X()
//! invocation. The time event fires (gets posted or published) when
//! the down-counter reaches zero.
//! the interval for the periodic time event (zero for the one-shot
//! time event)
//!
//! @details
//! The value of the interval is re-loaded to the internal
//! down-counter when the time event expires, so that the time event
//! keeps timing out periodically.
//! heads of linked lists of time events, one for every clock tick rate
//! The Time Event constructor
:
#ifndef Q_EVT_CTOR
QEvt(),
#else
QEvt(static_cast<QSignal>(sgnl), 0U),
#endif
m_next(nullptr),
m_act(act),
m_ctr(0U),
m_interval(0U)
//! @pre The signal must be valid and the tick rate in range
Q_REQUIRE_ID(300, (sgnl != 0)
&& (tickRate < QF_MAX_TICK_RATE));
#ifndef Q_EVT_CTOR
sig = static_cast<QSignal>(sgnl); // set QEvt::sig of this time event
#endif
// Setting the POOL_ID event attribute to zero is correct only for
// events not allocated from event pools, which must be the case
// for Time Events.
//
poolId_ = 0U;
// The refCtr_ attribute is not used in time events, so it is
// reused to hold the tickRate as well as other information
//
refCtr_ = static_cast<std::uint8_t>(tickRate);
noexcept
//! Arm a time event (one shot or periodic) for event posting
//!
//! @details
//! Arms a time event to fire in a specified number of clock ticks and
//! with a specified interval. If the interval is zero, the time event
//! is armed for one shot ('one-shot' time event). The time event gets
//! directly posted (using the FIFO policy) into the event queue of the
//! host active object. After posting, a one-shot time event gets
//! automatically disarmed while a periodic time event (interval != 0)
//! is automatically re-armed.
//!
//! A time event can be disarmed at any time by calling
//! QP::QTimeEvt::disarm(). Also, a time event can be re-armed to fire
//! in a different number of clock ticks by calling QP::QTimeEvt::rearm().
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to rearm the time event with.
//! @param[in] interval interval (in clock ticks) for periodic time event.
//!
//! @attention
//! Arming an already armed time event is __not__ allowed and is
//! considered a programming error. The QP/C++ framework will assert
//! if it detects an attempt to arm an already armed time event.
//!
//! @usage
//! The following example shows how to arm a one-shot time event from a
//! state machine of an active object:
//! @include qf_state.cpp
std::uint8_t const tickRate = refCtr_ & TE_TICK_RATE;
QTimeEvtCtr const ctr = m_ctr; // temporary to hold volatile
//! @pre the host AO must be valid, time evnet must be disarmed,
//! number of clock ticks cannot be zero, and the signal must be valid.
Q_REQUIRE_ID(400, (m_act != nullptr)
&& (ctr == 0U)
&& (nTicks != 0U)
&& (tickRate < static_cast<std::uint8_t>(QF_MAX_TICK_RATE))
&& (static_cast<enum_t>(sig) >= Q_USER_SIG));
#ifdef Q_NASSERT
(void)ctr; // avoid compiler warning about unused variable
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
m_ctr = nTicks;
m_interval = interval;
// is the time event unlinked?
// NOTE: For the duration of a single clock tick of the specified tick
// rate a time event can be disarmed and yet still linked into the list,
// because un-linking is performed exclusively in the QF_tickX() function.
//
if (static_cast<std::uint_fast8_t>(
static_cast<std::uint_fast8_t>(refCtr_) & TE_IS_LINKED) == 0U)
{
// mark as linked
refCtr_ = static_cast<std::uint8_t>(refCtr_ | TE_IS_LINKED);
// The time event is initially inserted into the separate
// "freshly armed" list based on timeEvtHead_[tickRate].act.
// Only later, inside QTimeEvt::tick_(), the "freshly armed"
// list is appended to the main list of armed time events based on
// timeEvtHead_[tickRate].next. Again, this is to keep any
// changes to the main list exclusively inside QTimeEvt::tick_().
m_next = timeEvtHead_[tickRate].toTimeEvt();
timeEvtHead_[tickRate].m_act = this;
}
#ifdef Q_SPY
std::uint_fast8_t const qs_id = static_cast<QActive *>(m_act)->m_prio;
#endif
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_ARM, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this time event object
QS_OBJ_PRE_(m_act); // the active object
QS_TEC_PRE_(nTicks); // the number of ticks
QS_TEC_PRE_(interval); // the interval
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
noexcept
//! Disarm a time event
//!
//! @details
//! Disarm the time event so it can be safely reused.
//!
//! @returns
//! 'true' if the time event was truly disarmed, that is, it was running.
//! The return of 'false' means that the time event was not truly
//! disarmed because it was not running. The 'false' return is only
//! possible for one-shot time events that have been automatically
//! disarmed upon expiration. In that case the 'false' return means that
//! the time event has already been posted or published and should be
//! expected in the active object's state machine.
//!
//! @note
//! there is no harm in disarming an already disarmed time event
QF_CRIT_STAT_
QF_CRIT_E_();
#ifdef Q_SPY
std::uint_fast8_t const qs_id = static_cast<QActive *>(m_act)->m_prio;
#endif
// is the time event actually armed?
bool wasArmed;
if (m_ctr != 0U) {
wasArmed = true;
refCtr_ = static_cast<std::uint8_t>(refCtr_ | TE_WAS_DISARMED);
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_DISARM, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this time event object
QS_OBJ_PRE_(m_act); // the target AO
QS_TEC_PRE_(m_ctr); // the number of ticks
QS_TEC_PRE_(m_interval); // the interval
QS_U8_PRE_(refCtr_& TE_TICK_RATE);
QS_END_NOCRIT_PRE_()
m_ctr = 0U; // schedule removal from the list
}
else { // the time event was already disarmed automatically
wasArmed = false;
refCtr_ = static_cast<std::uint8_t>(refCtr_
& static_cast<std::uint8_t>(~TE_WAS_DISARMED));
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_DISARM_ATTEMPT, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this time event object
QS_OBJ_PRE_(m_act); // the target AO
QS_U8_PRE_(refCtr_& TE_TICK_RATE); // tick rate
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return wasArmed;
noexcept
//! Rearm a time event
//!
//! @details
//! Rearms a time event with a new number of clock ticks. This function
//! can be used to adjust the current period of a periodic time event
//! or to prevent a one-shot time event from expiring (e.g., a watchdog
//! time event). Rearming a periodic timer leaves the interval unchanged
//! and is a convenient method to adjust the phasing of a periodic
//! time event.
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to rearm the time event with.
//!
//! @returns
//! 'true' if the time event was running as it was re-armed. The 'false'
//! return means that the time event was not truly rearmed because it was
//! not running. The 'false' return is only possible for one-shot time
//! events that have been automatically disarmed upon expiration. In that
//! case the 'false' return means that the time event has already been
//! posted and should be expected in the active object's state machine.
std::uint8_t const tickRate = refCtr_ & TE_TICK_RATE;
//! @pre AO must be valid, tick rate must be in range, nTicks must not
//! be zero, and the signal of this time event must be valid
Q_REQUIRE_ID(600, (m_act != nullptr)
&& (tickRate < static_cast<std::uint8_t>(QF_MAX_TICK_RATE))
&& (nTicks != 0U)
&& (static_cast<enum_t>(sig) >= Q_USER_SIG));
QF_CRIT_STAT_
QF_CRIT_E_();
// is the time evt not running?
bool wasArmed;
if (m_ctr == 0U) {
wasArmed = false;
// is the time event unlinked?
// NOTE: For a duration of a single clock tick of the specified
// tick rate a time event can be disarmed and yet still linked into
// the list, because unlinking is performed exclusively in the
// QTimeEvt::tickX() function.
if (static_cast<std::uint8_t>(refCtr_ & TE_IS_LINKED) == 0U) {
// mark as linked
refCtr_ = static_cast<std::uint8_t>(refCtr_ | TE_IS_LINKED);
// The time event is initially inserted into the separate
// "freshly armed" list based on timeEvtHead_[tickRate].act.
// Only later, inside QTimeEvt::tick_(), the "freshly armed"
// list is appended to the main list of armed time events based on
// timeEvtHead_[tickRate].next. Again, this is to keep any
// changes to the main list exclusively inside QTimeEvt::tick_().
m_next = timeEvtHead_[tickRate].toTimeEvt();
timeEvtHead_[tickRate].m_act = this;
}
}
else { // the time event is being disarmed
wasArmed = true;
}
m_ctr = nTicks; // re-load the tick counter (shift the phasing)
#ifdef Q_SPY
std::uint_fast8_t const qs_id = static_cast<QActive *>(m_act)->m_prio;
#endif
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_REARM, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this time event object
QS_OBJ_PRE_(m_act); // the target AO
QS_TEC_PRE_(m_ctr); // the number of ticks
QS_TEC_PRE_(m_interval); // the interval
QS_2U8_PRE_(tickRate, (wasArmed ? 1U : 0U));
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return wasArmed;
noexcept
//! Check the "was disarmed" status of a time event
//!
//! @details
//! Useful for checking whether a one-shot time event was disarmed in the
//! QTimeEvt_disarm() operation.
//!
//! @returns
//! 'true' if the time event was truly disarmed in the last
//! QTimeEvt::disarm() operation. The 'false' return means that the time
//! event was not truly disarmed, because it was not running at that time.
//! The 'false' return is only possible for one-shot time events that
//! have been automatically disarmed upon expiration. In this case the
//! 'false' return means that the time event has already been posted or
//! published and should be expected in the active object's event queue.
//!
//! @note
//! This function has a **side effect** of setting the "was disarmed"
//! status, which means that the second and subsequent times this
//! function is called the function will return 'true'.
std::uint8_t const isDisarmed = refCtr_ & TE_WAS_DISARMED;
// mark as disarmed
refCtr_ = static_cast<std::uint8_t>(refCtr_ | TE_WAS_DISARMED);
return isDisarmed != 0U;
const noexcept
//! Gets the active object associated with the time event
return m_act;
const noexcept
//! Gets the current count of the time event
return m_ctr;
const noexcept
//! Gets the interval of the time event
return m_interval;
noexcept
//! Processes all armed time events at every clock tick
//!
//! @details
//! This function must be called periodically from a time-tick ISR or from
//! a task so that QF can manage the timeout events assigned to the given
//! system clock tick rate.
//!
//! @param[in] tickRate system clock tick rate serviced [1..15].
//! @param[in] sender pointer to a sender object (used in QS only).
//!
//! @attention
//! this function should be called only via the macros TICK_X() or TICK()
//!
//! @note
//! the calls to QTimeEvt::tick_() with different `tickRate` parameter can
//! preempt each other. For example, higher clock tick rates might be
//! serviced from interrupts while others from tasks (active objects).
Q_UNUSED_PAR(sender); // when Q_SPY not defined
QTimeEvt *prev = &timeEvtHead_[tickRate];
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_TICK, 0U)
prev->m_ctr = (prev->m_ctr + 1U);
QS_TEC_PRE_(prev->m_ctr); // tick ctr
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
// scan the linked-list of time events at this rate...
for (;;) {
QTimeEvt *t = prev->m_next; // advance down the time evt. list
// end of the list?
if (t == nullptr) {
// any new time events armed since the last run of tick_()?
if (timeEvtHead_[tickRate].m_act != nullptr) {
// sanity check
Q_ASSERT_CRIT_(110, prev != nullptr);
prev->m_next = timeEvtHead_[tickRate].toTimeEvt();
timeEvtHead_[tickRate].m_act = nullptr;
t = prev->m_next; // switch to the new list
}
else {
break; // all currently armed time evts. processed
}
}
// time event scheduled for removal?
if (t->m_ctr == 0U) {
prev->m_next = t->m_next;
// mark time event 't' as NOT linked
t->refCtr_ = static_cast<std::uint8_t>(t->refCtr_
& static_cast<std::uint8_t>(~TE_IS_LINKED));
// do NOT advance the prev pointer
QF_CRIT_X_(); // exit crit. section to reduce latency
// prevent merging critical sections, see NOTE1 below
QF_CRIT_EXIT_NOP();
}
else {
t->m_ctr = (t->m_ctr - 1U);
// is time evt about to expire?
if (t->m_ctr == 0U) {
QActive * const act = t->toActive(); // temp for volatile
// periodic time evt?
if (t->m_interval != 0U) {
t->m_ctr = t->m_interval; // rearm the time event
prev = t; // advance to this time event
}
// one-shot time event: automatically disarm
else {
prev->m_next = t->m_next;
// mark time event 't' as NOT linked
t->refCtr_ = static_cast<std::uint8_t>(t->refCtr_
& static_cast<std::uint8_t>(~TE_IS_LINKED));
// do NOT advance the prev pointer
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_AUTO_DISARM,
act->m_prio)
QS_OBJ_PRE_(t); // this time event object
QS_OBJ_PRE_(act); // the target AO
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_POST, act->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(t); // the time event object
QS_SIG_PRE_(t->sig); // signal of this time event
QS_OBJ_PRE_(act); // the target AO
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
QF_CRIT_X_(); // exit crit. section before posting
// asserts if queue overflows
act->POST(t, sender);
}
else {
prev = t; // advance to this time event
QF_CRIT_X_(); // exit crit. section to reduce latency
// prevent merging critical sections
// In some QF ports the critical section exit takes effect only
// on the next machine instruction. If this case, the next
// instruction is another entry to a critical section, the
// critical section won't be really exited, but rather the
// two adjacent critical sections would be merged. The
// QF_CRIT_EXIT_NOP() macro contains minimal code required
// to prevent such merging of critical sections in QF ports,
// in which it can occur.
QF_CRIT_EXIT_NOP();
}
}
QF_CRIT_E_(); // re-enter crit. section to continue
}
QF_CRIT_X_();
//! Processes one clock tick for QUTest
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
noexcept
//! Returns true if all time events are inactive and false
//! any time event is active
//!
//! @details
//! Find out if any time events are armed at the given clock tick rate.
//!
//! @param[in] tickRate system clock tick rate to find out about.
//!
//! @returns
//! 'true' if no time events are armed at the given tick rate and
//! 'false' otherwise.
//!
//! @note
//! This function should be called in critical section.
//! @pre the tick rate must be in range
Q_REQUIRE_ID(200, tickRate < QF_MAX_TICK_RATE);
bool inactive;
if (timeEvtHead_[tickRate].m_next != nullptr) {
inactive = false;
}
else if (timeEvtHead_[tickRate].m_act != nullptr) {
inactive = false;
}
else {
inactive = true;
}
return inactive;
noexcept
//! encapsulate the cast the m_act attribute to QActive*
return static_cast<QActive *>(m_act);
noexcept
//! encapsulate the cast the `QTimeEvt.m_act` attribute
return static_cast<QTimeEvt *>(m_act);
//! private default constructor only for friends
//!
//! @note
//! private default ctor for internal use only
:
#ifdef Q_EVT_CTOR
QEvt(0U, 0U),
#else
QEvt(),
#endif // Q_EVT_CTOR
m_next(nullptr),
m_act(nullptr),
m_ctr(0U),
m_interval(0U)
#ifndef Q_EVT_CTOR
sig = 0U;
// Setting the POOL_ID event attribute to zero is correct only for
// events not allocated from event pools, which must be the case
// for Time Events.
//
poolId_ = 0U; // not from any event pool
// The refCtr_ attribute is not used in time events, so it is
// reused to hold the tickRate as well as other information
//
refCtr_ = 0U; // default rate 0
#endif // Q_EVT_CTOR
= delete
//! private copy constructor to disallow copying of QTimeEvts
= delete
//! disallow copying of QP::QTimeEvt
//! Native QF Event Queue class
//!
//! @details
//! This structure describes the native QF event queue, which can be used as
//! the event queue for active objects, or as a simple "raw" event queue for
//! thread-safe event passing among non-framework entities, such as ISRs,
//! device drivers, or other third-party components.<br>
//!
//! The native QF event queue is configured by defining the macro
//! #QF_EQUEUE_TYPE as QP::QEQueue in the specific QF port header file.<br>
//!
//! The QP::QEQueue class contains only data members for managing an event
//! queue, but does not contain the storage for the queue buffer, which must
//! be provided externally during the queue initialization.<br>
//!
//! The event queue can store only event pointers, not the whole events. The
//! internal implementation uses the standard ring-buffer plus one external
//! location that optimizes the queue operation for the most frequent case
//! of empty queue.<br>
//!
//! The QP::QEQueue class is used with two sets of functions. One set is for
//! the active object event queue, which needs to block the active object
//! task when the event queue is empty and unblock it when events are posted
//! to the queue. The interface for the native active object event queue
//! consists of the following functions: QActive::post(), QActive::postLIFO(),
//! and QActive::get_(). Additionally the function QEQueue::init() is used
//! to initialize the queue.<br>
//!
//! The other set of functions, uses this class as a simple "raw" event
//! queue to pass events between entities other than active objects, such as
//! ISRs. The "raw" event queue is not capable of blocking on the get()
//! operation, but is still thread-safe because it uses QF critical section
//! to protect its integrity. The interface for the "raw" thread-safe queue
//! consists of the following functions: QP::QEQueue::post(),
//! QP::QEQueue::postLIFO(), and QP::QEQueue::get(). Additionally the
//! function QP::QEQueue::init() is used to initialize the queue.
//!
//! @note
//! Most event queue operations (both the active object queues and the "raw"
//! queues) internally use the QF critical section. You should be careful
//! not to invoke those operations from other critical sections when nesting
//! of critical sections is not supported.
//! pointer to event at the front of the queue
//!
//! @details
//! All incoming and outgoing events pass through the m_frontEvt location.
//! When the queue is empty (which is most of the time), the extra
//! m_frontEvt location allows to bypass the ring buffer altogether,
//! greatly optimizing the performance of the queue. Only bursts of events
//! engage the ring buffer.<br>
//!
//! The additional role of this attribute is to indicate the empty status
//! of the queue. The queue is empty if the m_frontEvt location is nullptr.
//! pointer to the start of the ring buffer
//! offset of the end of the ring buffer from the start of the buffer
//! offset to where next event will be inserted into the buffer
//! offset of where next event will be extracted from the buffer
//! number of free events in the ring buffer
//! minimum number of free events ever in the ring buffer.
//! @note this attribute remembers the low-watermark of the ring buffer,
//! which provides a valuable information for sizing event queues.
//! @sa QP::QF::getQueueMin().
noexcept
//! public default constructor
: m_frontEvt(nullptr),
m_ring(nullptr),
m_end(0U),
m_head(0U),
m_tail(0U),
m_nFree(0U),
m_nMin(0U)
noexcept
//! Initializes the native QF event queue
//!
//! @details
//! Initialize the event queue by giving it the storage for the
//! ring buffer.
//!
//! @param[in] qSto an array of pointers to QP::QEvt to serve as the
//! ring buffer for the event queue
//! @param[in] qLen the length of the qSto[] buffer (in QP::QEvt pointers)
//!
//! @note
//! The actual capacity of the queue is qLen + 1, because of the extra
//! location forntEvt.
//!
//! @note
//! This function is also used to initialize the event queues of active
//! objects in the built-int QV, QK and QXK kernels, as well as other
//! QP ports to OSes/RTOSes that do provide a suitable message queue.
m_frontEvt = nullptr; // no events in the queue
m_ring = &qSto[0];
m_end = static_cast<QEQueueCtr>(qLen);
if (qLen > 0U) {
m_head = 0U;
m_tail = 0U;
}
m_nFree = static_cast<QEQueueCtr>(qLen + 1U); //+1 for frontEvt
m_nMin = m_nFree;
noexcept
//! Posts (FIFO) an event to the "raw" thread-safe QF event queue
//!
//! @details
//! Post an event to the "raw" thread-safe event queue using the
//! First-In-First-Out (FIFO) order.
//!
//! @param[in] e pointer to the event to be posted to the queue
//! @param[in] margin number of required free slots in the queue after
//! posting the event. The special value
//! QF::NO_MARGIN means that this function will
//! assert if posting
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @note
//! The QF::NO_MARGIN value of the `margin` argument is special and
//! denotes situation when the post() operation is assumed to succeed
//! (event delivery guarantee). An assertion fires, when the event cannot
//! be delivered in this case.
//!
//! @returns 'true' (success) when the posting succeeded with the provided
//! margin and 'false' (failure) when the posting fails.
//!
//! @note
//! This function can be called from any task context or ISR context.
//!
//! @sa QP::QEQueue::postLIFO(), QP::QEQueue::get()
//! @pre event must be valid
Q_REQUIRE_ID(200, e != nullptr);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = m_nFree; // temporary to avoid UB for volatile access
// margin available?
bool status;
if (((margin == QF::NO_MARGIN) && (nFree > 0U))
|| (nFree > static_cast<QEQueueCtr>(margin)))
{
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
--nFree; // one free entry just used up
m_nFree = nFree; // update the volatile
if (m_nMin > nFree) {
m_nMin = nFree; // update minimum so far
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST, qs_id)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this queue object
QS_2U8_PRE_(e->poolId_, e->refCtr_);// pool Id & refCtr of the evt
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(m_nMin); // min number of free entries
QS_END_NOCRIT_PRE_()
// is the queue empty?
if (m_frontEvt == nullptr) {
m_frontEvt = e; // deliver event directly
}
// queue is not empty, leave event in the ring-buffer
else {
// insert event into the ring buffer (FIFO)
m_ring[m_head] = e; // insert e into buffer
// need to wrap?
if (m_head == 0U) {
m_head = m_end; // wrap around
}
m_head = (m_head - 1U);
}
status = true; // event posted successfully
}
else {
//! @note assert if event cannot be posted and dropping events is
//! not acceptable
Q_ASSERT_CRIT_(210, margin != QF::NO_MARGIN);
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST_ATTEMPT, qs_id)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this queue object
QS_2U8_PRE_(e->poolId_, e->refCtr_);// pool Id & refCtr of the evt
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(margin); // margin requested
QS_END_NOCRIT_PRE_()
status = false; // event not posted
}
QF_CRIT_X_();
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
return status;
noexcept
//! Posts (LIFO) an event to the "raw" thread-safe QF event queue
//!
//! @details
//! Post an event to the "raw" thread-safe event queue using the
//! Last-In-First-Out (LIFO) order.
//!
//! @param[in] e pointer to the event to be posted to the queue
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! The LIFO policy should be used only with great __caution__,
//! because it alters the order of events in the queue.
//!
//! @note
//! This function can be called from any task context or ISR context.
//!
//! @note
//! This function is used for the "raw" thread-safe queues and __not__
//! for the queues of active objects.
//!
//! @sa
//! QEQueue::post(), QEQueue::get(), QActive::defer()
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = m_nFree; // temporary to avoid UB for volatile access
//! @pre the queue must be able to accept the event (cannot overflow)
Q_REQUIRE_CRIT_(300, nFree != 0U);
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
--nFree; // one free entry just used up
m_nFree = nFree; // update the volatile
if (m_nMin > nFree) {
m_nMin = nFree; // update minimum so far
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST_LIFO, qs_id)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this queue object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(m_nMin); // min number of free entries
QS_END_NOCRIT_PRE_()
QEvt const * const frontEvt = m_frontEvt; // read volatile into temporary
m_frontEvt = e; // deliver event directly to the front of the queue
// was the queue not empty?
if (frontEvt != nullptr) {
m_tail = (m_tail + 1U);
if (m_tail == m_end) { // need to wrap the tail?
m_tail = 0U; // wrap around
}
m_ring[m_tail] = frontEvt; // buffer the old front evt
}
QF_CRIT_X_();
noexcept
//! Gets an event from the "raw" thread-safe QF event queue
//!
//! @details
//! Retrieves an event from the front of the "raw" thread-safe queue and
//! returns a pointer to this event to the caller.
//!
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! pointer to event at the front of the queue, if the queue is
//! not empty and NULL if the queue is empty.
//!
//! @note
//! this function is used for the "raw" thread-safe queues and **not**
//! for the queues of active objects.
//!
//! @sa
//! QEQueue::post(), QEQueue::postLIFO(), QActive::recall()
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
QF_CRIT_STAT_
QF_CRIT_E_();
QEvt const * const e = m_frontEvt; // always remove evt from the front
// is the queue not empty?
if (e != nullptr) {
QEQueueCtr const nFree = m_nFree + 1U;
m_nFree = nFree; // upate the number of free
// any events in the the ring buffer?
if (nFree <= m_end) {
m_frontEvt = m_ring[m_tail]; // remove from the tail
if (m_tail == 0U) { // need to wrap?
m_tail = m_end; // wrap around
}
m_tail = (m_tail - 1U);
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_GET, qs_id)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this queue object
QS_2U8_PRE_(e->poolId_, e->refCtr_);
QS_EQC_PRE_(nFree); // # free entries
QS_END_NOCRIT_PRE_()
}
else {
m_frontEvt = nullptr; // queue becomes empty
// all entries in the queue must be free (+1 for fronEvt)
Q_ASSERT_CRIT_(410, nFree == (m_end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_GET_LAST, qs_id)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this queue object
QS_2U8_PRE_(e->poolId_, e->refCtr_);
QS_END_NOCRIT_PRE_()
}
}
QF_CRIT_X_();
return e;
const noexcept
//! Gets the number of free slots currently in "raw" thread-safe
//! QF event queue
//!
//! @note
//! This operation needs to be used with caution because the
//! number of free entries can change unexpectedly. The main intent for
//! using this operation is in conjunction with event deferral. In this
//! case the queue is accessed only from a single thread (by a single AO),
//! so the number of free entries cannot change unexpectedly.
//!
//! @sa QP::QMActive::defer(), QP::QMActive::recall()
return m_nFree;
const noexcept
//! "raw" thread-safe QF event queue operation for obtaining the minimum
//! number of free entries ever in the queue (a.k.a. "low-watermark").
//!
//! @details
//! This operation needs to be used with caution because the
//! "low-watermark" can change unexpectedly. The main intent for using
//! this operation is to get an idea of queue usage to size the queue
//! adequately.
//!
//! @returns the minimum number of free entries ever in the queue
//! since init.
return m_nMin;
const noexcept
//! "raw" thread-safe QF event queue operation to find out if the queue
//! is empty
//! @note
//! This operation needs to be used with caution because the
//! queue status can change unexpectedly. The main intent for using
//! this operation is in conjunction with event deferral. In this case
//! the queue is accessed only from a single thread (by a single AO),
//! so no other entity can post events to the queue.
//!
//! @sa QP::QMActive::defer(), QP::QMActive::recall()
return m_frontEvt == nullptr;
= delete
//! disallow copying of QP::QEQueue
= delete
//! disallow copying of QP::QEQueue
//! Native QF memory pool class
//!
//! @details
//! A fixed block-size memory pool is a very fast and efficient data
//! structure for dynamic allocation of fixed block-size chunks of memory.
//! A memory pool offers fast and deterministic allocation and recycling of
//! memory blocks and is not subject to fragmenation.@n
//! @n
//! The QP::QMPool class describes the native QF memory pool, which can be
//! used as the event pool for dynamic event allocation, or as a fast,
//! deterministic fixed block-size heap for any other objects in your
//! application.
//!
//! @note
//! The QP::QMPool class contains only data members for managing a memory
//! pool, but does not contain the pool storage, which must be provided
//! externally during the pool initialization.
//!
//! @note
//! The native QF event pool is configured by defining the macro
//! #QF_EPOOL_TYPE_ as QP::QMPool in the specific QF port header file.
//! start of the memory managed by this memory pool
//! end of the memory managed by this memory pool
//! head of linked list of free blocks
//! maximum block size (in bytes)
//! total number of blocks
//! number of free blocks remaining
//! minimum number of free blocks ever present in this pool
//!
//! @note
//! This attribute remembers the low watermark of the pool,
//! which provides a valuable information for sizing event pools.
//!
//! @sa QF::getPoolMin().
//! public default constructor
: m_start(nullptr),
m_end(nullptr),
m_free_head(nullptr),
m_blockSize(0U),
m_nTot(0U),
m_nFree(0U),
m_nMin(0U)
noexcept
//! Initializes the native QF event pool
//!
//! @details
//! Initialize a fixed block-size memory pool by providing it with the
//! pool memory to manage, size of this memory, and the block size.
//!
//! @param[in] poolSto pointer to the memory buffer for pool storage
//! @param[in] poolSize size of the storage buffer in bytes
//! @param[in] blockSize fixed-size of the memory blocks in bytes
//!
//! @attention
//! The caller of QMPool::init() must make sure that the `poolSto`
//! pointer is properly **aligned**. In particular, it must be possible to
//! efficiently store a pointer at the location pointed to by `poolSto`.
//! Internally, the QMPool::init() function rounds up the block size
//! `blockSize` so that it can fit an integer number of pointers. This
//! is done to achieve proper alignment of the blocks within the pool.
//!
//! @note
//! Due to the rounding of block size the actual capacity of the pool
//! might be less than (`poolSize` / `blockSize`). You can check the
//! capacity of the pool by calling the QF::getPoolMin() function.
//!
//! @note
//! This function is **not** protected by a critical section, because
//! it is intended to be called only during the initialization of the
//! system, when interrupts are not allowed yet.
//!
//! @note
//! Many QF ports use memory pools to implement the event pools.
//! @pre The memory block must be valid and
//! the poolSize must fit at least one free block and
//! the blockSize must not be too close to the top of the dynamic range
Q_REQUIRE_ID(100, (poolSto != nullptr)
&& (poolSize >= static_cast<std::uint_fast32_t>(sizeof(QFreeBlock)))
&& (static_cast<std::uint_fast16_t>(blockSize + sizeof(QFreeBlock))
> blockSize));
m_free_head = poolSto;
// round up the blockSize to fit an integer number of pointers...
//start with one
m_blockSize = static_cast<QMPoolSize>(sizeof(QFreeBlock));
//# free blocks in a memory block
std::uint_fast16_t nblocks = 1U;
while (m_blockSize < static_cast<QMPoolSize>(blockSize)) {
m_blockSize += static_cast<QMPoolSize>(sizeof(QFreeBlock));
++nblocks;
}
// use rounded-up value
blockSize = static_cast<std::uint_fast16_t>(m_blockSize);
// the whole pool buffer must fit at least one rounded-up block
Q_ASSERT_ID(110, poolSize >= blockSize);
// chain all blocks together in a free-list...
// don't count the last block
poolSize -= static_cast<std::uint_fast32_t>(blockSize);
m_nTot = 1U; // one (the last) block in the pool
// start at the head of the free list
QFreeBlock *fb = static_cast<QFreeBlock *>(m_free_head);
// chain all blocks together in a free-list...
while (poolSize >= blockSize) {
fb->m_next = &fb[nblocks]; // setup the next link
fb = fb->m_next; // advance to next block
// reduce the available pool size
poolSize -= static_cast<std::uint_fast32_t>(blockSize);
++m_nTot; // increment the number of blocks so far
}
fb->m_next = nullptr; // the last link points to NULL
m_nFree = m_nTot; // all blocks are free
m_nMin = m_nTot; // the minimum number of free blocks
m_start = poolSto; // the original start this pool buffer
m_end = fb; // the last block in this pool
noexcept
//! Obtains a memory block from a memory pool
//!
//! @details
//! The function allocates a memory block from the pool and returns a
//! pointer to the block back to the caller.
//!
//! @param[in] margin the minimum number of unused blocks still
//! available in the pool after the allocation.
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! A pointer to a memory block or NULL if no more blocks are available
//! in the memory pool.
//!
//! @note
//! This function can be called from any task level or ISR level.
//!
//! @note
//! The memory pool must be initialized before any events can
//! be requested from it. Also, the QP::QMPool::get() function uses
//! internally a QF critical section, so you should be careful not to
//! call it from within a critical section when nesting of critical
//! section is not supported.
//!
//! @attention
//! An allocated block must be later returned back to the **same** pool
//! from which it has been allocated.
//!
//! @sa
//! QP::QMPool::put()
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
QF_CRIT_STAT_
QF_CRIT_E_();
// have the than margin?
QFreeBlock *fb;
if (m_nFree > static_cast<QMPoolCtr>(margin)) {
fb = static_cast<QFreeBlock *>(m_free_head); // get a free block
// the pool has some free blocks, so a free block must be available
Q_ASSERT_CRIT_(310, fb != nullptr);
// put volatile to a temporary to avoid UB
void * const fb_next = fb->m_next;
// is the pool becoming empty?
m_nFree = (m_nFree - 1U); // one free block less
if (m_nFree == 0U) {
// pool is becoming empty, so the next free block must be NULL
Q_ASSERT_CRIT_(320, fb_next == nullptr);
m_nMin = 0U;// remember that pool got empty
}
else {
//! @invariant
//! The pool is not empty, so the next free-block pointer,
//! so the next free block must be in range.
//!
//! @tr{PQP18_3}
// NOTE: The next free block pointer can fall out of range
// when the client code writes past the memory block, thus
// corrupting the next block.
Q_ASSERT_CRIT_(330, QF_PTR_RANGE_(fb_next, m_start, m_end));
// is the number of free blocks the new minimum so far?
if (m_nMin > m_nFree) {
m_nMin = m_nFree; // remember the minimum so far
}
}
m_free_head = fb_next; // set the head to the next free block
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_GET, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this memory pool
QS_MPC_PRE_(m_nFree); // # of free blocks in the pool
QS_MPC_PRE_(m_nMin); // min # free blocks ever in the pool
QS_END_NOCRIT_PRE_()
}
// don't have enough free blocks at this point
else {
fb = nullptr;
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_GET_ATTEMPT, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(m_start); // the memory managed by this pool
QS_MPC_PRE_(m_nFree); // the # free blocks in the pool
QS_MPC_PRE_(margin); // the requested margin
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return fb; // return the block or NULL pointer to the caller
noexcept
//! Returns a memory block back to a memory pool
//!
//! @details
//! Recycle a memory block to the fixed block-size memory pool.
//!
//! @param[in] b pointer to the memory block that is being recycled
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @attention
//! The recycled block must be allocated from the **same** memory pool
//! to which it is returned.
//!
//! @note
//! This function can be called from any task level or ISR level.
//!
//! @sa
//! QP::QMPool::get()
Q_UNUSED_PAR(qs_id); // when Q_SPY not defined
//! @pre # free blocks cannot exceed the total # blocks and
//! the block pointer must be in range to come from this pool.
//!
Q_REQUIRE_ID(200, (m_nFree < m_nTot)
&& QF_PTR_RANGE_(b, m_start, m_end));
QF_CRIT_STAT_
QF_CRIT_E_();
static_cast<QFreeBlock*>(b)->m_next =
static_cast<QFreeBlock *>(m_free_head); // link into the free list
m_free_head = b; // set as new head of the free list
m_nFree = (m_nFree + 1U); // one more free block in this pool
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_PUT, qs_id)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this memory pool
QS_MPC_PRE_(m_nFree); // the number of free blocks in the pool
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
const noexcept
//! return the fixed block-size of the blocks managed by this pool
return m_blockSize;
const noexcept
//! Memory pool operation for obtaining the minimum number of free
//! blocks ever in the pool (a.k.a. "low-watermark").
//!
//! @details
//! This operation needs to be used with caution because the
//! "low-watermark" can change unexpectedly. The main intent for using
//! this operation is to get an idea of pool usage to size the pool
//! adequately.
//!
//! @returns the minimum number of free entries ever in the memory pool
//! since init.
return m_nMin;
const noexcept
//! Memory pool operation for obtaining the current number of free
//! blocks in the pool.
//!
//! @details
//! This operation needs to be used with caution because the number
//! of free blocks can change unexpectedly.
//!
//! @returns the current number of free blocks in the memory pool.
return m_nFree;
= delete
//! disallow copying of QP::QMPool
= delete
//! disallow copying of QP::QMPool
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
//! "Ticker" Active Object class (inherits QP::QActive)
//!
//! @details
//! QP::QTicker is an efficient active object specialized to process
//! QF system clock tick at a specified tick frequency [0..#QF_MAX_TICK_RATE].
//! Placing system clock tick processing in an active object allows you
//! to remove the non-deterministic TICK_X() processing from the interrupt
//! level and move it into the thread-level, where you can prioritize it
//! as low as you wish.
//!
//! @usage
//! The following example illustrates use of QP::QTicker active objects:
//! @include qf_ticker.cpp
noexcept
//! constructor
: QActive(nullptr)
// reuse m_head for tick-rate
m_eQueue.m_head = static_cast<QEQueueCtr>(tickRate);
override
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
m_eQueue.m_tail = 0U;
override
QTicker::init(nullptr, qs_id);
override
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nTicks = m_eQueue.m_tail; // # ticks since the last call
m_eQueue.m_tail = 0U; // clear the # ticks
QF_CRIT_X_();
for (; nTicks > 0U; --nTicks) {
QTimeEvt::TICK_X(static_cast<std::uint_fast8_t>(m_eQueue.m_head),
this);
}
noexcept override
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(margin);
Q_UNUSED_PAR(sender); // when Q_SPY not defined
QF_CRIT_STAT_
QF_CRIT_E_();
if (m_eQueue.m_frontEvt == nullptr) {
#ifdef Q_EVT_CTOR
static QEvt const tickEvt(0U, 0U);
#else
static QEvt const tickEvt = { 0U, 0U, 0U };
#endif // Q_EVT_CTOR
m_eQueue.m_frontEvt = &tickEvt; // deliver event directly
m_eQueue.m_nFree = (m_eQueue.m_nFree - 1U); // one less free event
QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue
}
// account for one more tick event
m_eQueue.m_tail = (m_eQueue.m_tail + 1U);
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(0U); // the signal of the event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(0U, 0U); // pool-Id & ref-ctr
QS_EQC_PRE_(0U); // number of free entries
QS_EQC_PRE_(0U); // min number of free entries
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return true; // the event is always posted correctly
//! Interrupt lock up-down counter (used in some QF ports)
//! Interrupt nesting up-down counter (used in some QF ports)
//! QF initialization
//!
//! @details
//! Initializes QF and must be called exactly once before any other QF
//! function. Typcially, QP::QF::init() is called from main() even before
//! initializing the Board Support Package (BSP).
//!
//! @note
//! QP::QF::init() clears the internal QF variables, so that the framework
//! can start correctly even if the startup code fails to clear the
//! uninitialized data (as is required by the C Standard).
//! Function invoked by the application layer to stop the QF
//! application and return control to the OS/Kernel
//!
//! @details
//! This function stops the QF application. After calling this function,
//! QF attempts to gracefully stop the application. This graceful shutdown
//! might take some time to complete. The typical use of this function is
//! for terminating the QF application to return back to the operating
//! system or for handling fatal errors that require shutting down
//! (and possibly re-setting) the system.
//!
//! @attention
//! After calling QF::stop() the application must terminate and cannot
//! continue. In particular, QF::stop() is **not** intended to be followed
//! by a call to QF::init() to "resurrect" the application.
//!
//! @sa QP::QF::onCleanup()
//! Transfers control to QF to run the application
//!
//! @details
//! QF::run() is typically called from your startup code after you
//! initialize the QF and start at least one active object with
//! QActive::start().
//!
//! @returns
//! In QK, the QP::QF::run() function does not return.
//! Startup QF callback (defined in applications/ports)
//!
//! @details
//! The purpose of the QF::onStartup() callback is to configure and enable
//! hardware interrupts. The callback is invoked from QF::run(), right before
//! starting the underlying real-time kernel. By that time, the application
//! is considered ready to receive and service interrupts.
//!
//! This function is application-specific and is not implemented in QF, but
//! rather in the Board Support Package (BSP) for the given application.
//! Cleanup QF callback (defined in applications/ports)
noexcept
//! This function returns the minimum of free entries of the given
//! event queue of an active object (indicated by priority `prio`)
//!
//! @details
//! Queries the minimum of free ever present in the given event queue of
//! an active object with priority `prio`, since the active object
//! was started.
//!
//! @note
//! QF::getQueueMin() is available only when the native QF event queue
//! implementation is used. Requesting the queue minimum of an unused
//! priority level raises an assertion in the QF. (A priority level
//! becomes used in QF after the call to QActive::register_().)
//!
//! @param[in] prio Priority of the active object, whose queue is queried
//!
//! @returns
//! the minimum of free ever present in the given event queue of an active
//! object with priority `prio`, since the active object was started.
Q_REQUIRE_ID(400, (prio <= QF_MAX_ACTIVE)
&& (QActive::registry_[prio] != nullptr));
QF_CRIT_STAT_
QF_CRIT_E_();
std::uint_fast16_t const min = static_cast<std::uint_fast16_t>(
QActive::registry_[prio]->m_eQueue.getNMin());
QF_CRIT_X_();
return min;
noexcept
//! Publish-subscribe initialization
//!
//! @deprecated
//! superseded by QActive::psInit()
QActive::psInit(subscrSto, maxSignal);
noexcept
//! Publish event to all subscribers of a given signal `e->sig`
//!
//! @deprecated
//! superseded by QActive::publish_()
QActive::publish_(e, sender, qs_id);
noexcept
//! Processes all armed time events at every clock tick
//!
//! @deprecated
//! superseded by QTimeEvt::tick_()
QTimeEvt::tick_(tickRate, sender);
//! Special value of margin that causes asserting failure in case
//! event allocation or event posting fails
{0xFFFFU};
noexcept
//! Event pool initialization for dynamic allocation of events.
//!
//! @details
//! This function initializes one event pool at a time and must be called
//! exactly once for each event pool before the pool can be used.
//!
//! @param[in] poolSto pointer to the storage for the event pool
//! @param[in] poolSize size of the storage for the pool in bytes
//! @param[in] evtSize the block-size of the pool in bytes, which
//! determines the maximum size of events that
//! can be allocated from the pool
//! @note
//! You might initialize many event pools by making many consecutive calls
//! to the QF::poolInit() function. However, for the simplicity of the
//! internal implementation, you must initialize event pools in the
//! ascending order of the event size.
//!
//! @note
//! The actual number of events available in the pool might be actually
//! less than (`poolSize / evtSize`) due to the internal alignment of
//! the blocks that the pool might perform. You can always check the
//! capacity of the pool by calling QF::getPoolMin().
//!
//! @note
//! The dynamic allocation of events is optional, meaning that you might
//! choose not to use dynamic events. In that case calling
//! QF::poolInit() and using up memory for the memory blocks is
//! unnecessary.
//!
//! @sa QF initialization example for QF::init()
//! @pre cannot exceed the number of available memory pools
Q_REQUIRE_ID(200, QF::maxPool_ < QF_MAX_EPOOL);
//! @pre QF event pools must be initialized in ascending order of evtSize
if (QF::maxPool_ > 0U) {
Q_REQUIRE_ID(201,
QF_EPOOL_EVENT_SIZE_(QF::ePool_[QF::maxPool_ - 1U]) < evtSize);
}
QF_EPOOL_INIT_(QF::ePool_[QF::maxPool_], poolSto, poolSize, evtSize);
++QF::maxPool_; // one more pool
#ifdef Q_SPY
// generate the object-dictionary entry for the initialized pool
char obj_name[9] = "EvtPool?";
obj_name[7] = static_cast<char>(
static_cast<std::int8_t>('0')
+ static_cast<std::int8_t>(QF::maxPool_));
QS::obj_dict_pre_(&QF::ePool_[QF::maxPool_ - 1U], &obj_name[0]);
#endif // Q_SPY
noexcept
//! Internal QF implementation of creating new dynamic mutable event
//!
//! @details
//! Allocates an event dynamically from one of the QF event pools.
//!
//! @param[in] evtSize the size (in bytes) of the event to allocate
//! @param[in] margin the number of un-allocated events still available
//! in a given event pool after the allocation
//! completes. The special value QF::NO_MARGIN
//! means that this function will assert if allocation
//! fails.
//! @param[in] sig the signal to be assigned to the allocated event
//!
//! @returns
//! pointer to the newly allocated event. This pointer can be nullptr
//! only if margin!=0 and the event cannot be allocated with the
//! specified margin still available in the given pool.
//!
//! @note
//! The internal QF function QF::newX_() raises an assertion when
//! the margin argument is QF::NO_MARGIN and allocation of the event
//! turns out to be impossible due to event pool depletion, or incorrect
//! (too big) size of the requested event.
//!
//! @note
//! The application code should not call this function directly.
//! The only allowed use is thorough the macros Q_NEW() or Q_NEW_X().
std::uint_fast8_t idx;
// find the pool id that fits the requested event size ...
for (idx = 0U; idx < QF::maxPool_; ++idx) {
if (evtSize <= QF_EPOOL_EVENT_SIZE_(QF::ePool_[idx])) {
break;
}
}
// cannot run out of registered pools
Q_ASSERT_ID(310, idx < QF::maxPool_);
// get e -- platform-dependent
QEvt *e;
#ifdef Q_SPY
QF_EPOOL_GET_(QF::ePool_[idx], e, ((margin != QF::NO_MARGIN) ? margin : 0U),
static_cast<std::uint_fast8_t>(QS_EP_ID) + idx + 1U);
#else
QF_EPOOL_GET_(QF::ePool_[idx], e, ((margin != QF::NO_MARGIN) ? margin : 0U),
0U);
#endif
// was e allocated correctly?
QS_CRIT_STAT_
if (e != nullptr) {
e->sig = static_cast<QSignal>(sig); // set the signal
e->poolId_ = static_cast<std::uint8_t>(idx + 1U); // store pool ID
e->refCtr_ = 0U; // initialize the reference counter to 0
QS_BEGIN_PRE_(QS_QF_NEW,
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(e->poolId_))
QS_TIME_PRE_(); // timestamp
QS_EVS_PRE_(evtSize); // the size of the evt
QS_SIG_PRE_(sig); // the signal of the evt
QS_END_PRE_()
}
else {
// This assertion means that the event allocation failed,
// and this failure cannot be tolerated. The most frequent
// reason is an event leak in the application.
Q_ASSERT_ID(320, margin != QF::NO_MARGIN);
QS_BEGIN_PRE_(QS_QF_NEW_ATTEMPT,
static_cast<std::uint_fast8_t>(QS_EP_ID) + idx + 1U)
QS_TIME_PRE_(); // timestamp
QS_EVS_PRE_(evtSize); // the size of the evt
QS_SIG_PRE_(sig); // the signal of the evt
QS_END_PRE_()
}
return e; // can't be NULL if we can't tolerate bad allocation
noexcept
//! Recycle a dynamic event
//!
//! @details
//! This function implements a garbage collector for dynamic events.
//! Only dynamic events are candidates for recycling. (A dynamic event
//! is one that is allocated from an event pool, which is determined as
//! non-zero `e->poolId_` attribute.) Next, the function decrements the
//! reference counter of the event (`e->refCtr_`), and recycles the event
//! only if the counter drops to zero (meaning that no more references
//! are outstanding for this event). The dynamic event is recycled by
//! returning it to the pool from which it was originally allocated.
//!
//! @param[in] e pointer to the event to recycle
//!
//! @note
//! QF invokes the garbage collector at all appropriate contexts, when
//! an event can become garbage (automatic garbage collection), so the
//! application code should have no need to call QF::gc() directly.
//! The QF::gc() function is exposed only for special cases when your
//! application sends dynamic events to the "raw" thread-safe queues
//! (see QP::QEQueue). Such queues are processed outside of QF and the
//! automatic garbage collection is **NOT** performed for these events.
//! In this case you need to call QF::gc() explicitly.
// is it a dynamic event?
if (e->poolId_ != 0U) {
QF_CRIT_STAT_
QF_CRIT_E_();
// isn't this the last reference?
if (e->refCtr_ > 1U) {
QS_BEGIN_NOCRIT_PRE_(QS_QF_GC_ATTEMPT,
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(e->poolId_))
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & refCtr
QS_END_NOCRIT_PRE_()
QEvt_refCtr_dec_(e); // decrement the ref counter
QF_CRIT_X_();
}
// this is the last reference to this event, recycle it
else {
std::uint_fast8_t const idx =
static_cast<std::uint_fast8_t>(e->poolId_) - 1U;
QS_BEGIN_NOCRIT_PRE_(QS_QF_GC,
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(e->poolId_))
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_);
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
// pool ID must be in range
Q_ASSERT_ID(410, idx < QF::maxPool_);
#ifdef Q_EVT_XTOR
// explicitly exectute the destructor'
// NOTE: casting 'const' away is legitimate,
// because it's a pool event
QF_CONST_CAST_(QEvt*, e)->~QEvt(); // xtor,
#endif
#ifdef Q_SPY
// cast 'const' away, which is OK, because it's a pool event
QF_EPOOL_PUT_(QF::ePool_[idx], QF_CONST_CAST_(QEvt*, e),
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(e->poolId_));
#else
QF_EPOOL_PUT_(QF::ePool_[idx], QF_CONST_CAST_(QEvt*, e), 0U);
#endif
}
}
noexcept
//! Obtain the block size of any registered event pools
return QF_EPOOL_EVENT_SIZE_(QF::ePool_[QF::maxPool_ - 1U]);
noexcept
//! Internal QF implementation of creating new event reference
//!
//! @details
//! Creates and returns a new reference to the current event e
//!
//! @param[in] e pointer to the current event
//! @param[in] evtRef the event reference
//!
//! @returns
//! the newly created reference to the event `e`
//!
//! @note
//! The application code should not call this function directly.
//! The only allowed use is thorough the macro Q_NEW_REF().
//! @pre the event must be dynamic and the provided event reference
//! must not be already in use
Q_REQUIRE_ID(500, (e->poolId_ != 0U)
&& (evtRef == nullptr));
QF_CRIT_STAT_
QF_CRIT_E_();
QEvt_refCtr_inc_(e); // increments the ref counter
QS_BEGIN_NOCRIT_PRE_(QS_QF_NEW_REF,
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(e->poolId_))
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & ref Count
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return e;
noexcept
//! Internal QF implementation of deleting event reference
//!
//! @details
//! Deletes an existing reference to the event e
//!
//! @param[in] evtRef the event reference
//!
//! @note
//! The application code should not call this function directly.
//! The only allowed use is thorough the macro Q_DELETE_REF().
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_DELETE_REF,
static_cast<std::uint_fast8_t>(QS_EP_ID)
+ static_cast<std::uint_fast8_t>(evtRef->poolId_))
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(evtRef->sig); // the signal of the event
QS_2U8_PRE_(evtRef->poolId_, evtRef->refCtr_); // pool Id & ref Count
QS_END_PRE_()
#if (QF_MAX_EPOOL > 0U)
gc(evtRef); // recycle the referenced event
#endif
noexcept
//! This function returns the minimum of free entries of the given
//! event pool
//!
//! @details
//! This function obtains the minimum number of free blocks in the given
//! event pool since this pool has been initialized by a call to
//! QP::QF::poolInit().
//!
//! @param[in] poolId event pool ID in the range 1..QF::maxPool_, where
//! QF::maxPool_ is the number of event pools
//! initialized with the function QF::poolInit().
//! @returns
//! the minimum number of unused blocks in the given event pool.
//! @pre the poolId must be in range
Q_REQUIRE_ID(400, (QF::maxPool_ <= QF_MAX_EPOOL)
&& (0U < poolId) && (poolId <= QF::maxPool_));
QF_CRIT_STAT_
QF_CRIT_E_();
std::uint_fast16_t const min = static_cast<std::uint_fast16_t>(
QF::ePool_[poolId - 1U].getNMin());
QF_CRIT_X_();
return min;
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
noexcept
//! the "FromISR" variant used in the QP port to "FreeRTOS"
//! "Ready-set" of all threads used in the built-in kernels
//! event pools managed by QF
//! number of initialized event pools
noexcept
//! Clear a specified region of memory to zero
//!
//! @details
//! Clears a memory buffer by writing zeros byte-by-byte.
//!
//! @param[in] start pointer to the beginning of a memory buffer.
//! @param[in] len length of the memory buffer to clear (in bytes)
//!
//! @note The main application of this function is clearing the internal
//! QF variables upon startup. This is done to avoid problems with
//! non-standard startup code provided with some compilers and toolchains
//! (e.g., TI DSPs or Microchip MPLAB), which does not zero the
//! uninitialized variables, as required by the C++ standard.
std::uint8_t *ptr = static_cast<std::uint8_t *>(start);
for (std::uint_fast16_t n = len; n > 0U; --n) {
*ptr = 0U;
++ptr;
}
Native QF event pool
QMPool
Native QF event pool initialization
\
(p_).init((poolSto_), (poolSize_), (evtSize_))
Native QF event pool event-size getter
((p_).getBlockSize())
Native QF event pool get-event
\
((e_) = static_cast<QEvt *>((p_).get((m_), (qs_id_))))
Native QF event pool put-event
((p_).put((e_), (qs_id_)))
//! The size [bytes] of the internal QS buffer-counters. Valid values: 2U or 4U;
//! default 2U.
//!
//! @details
//! This macro can be defined in the QS port file (qs_port.hpp) to
//! configure the QS::QSCtr type. Here the macro is not defined so the
//! default of 2 byte is chosen.
2U
//! Size (in bytes) of the QS time stamp
//!
//! @details
//! This macro can be defined in the QS port file (qs_port.hpp) to configure
//! the QP::QSTimeCtr type. Valid values 1U, 2U, 4U. Default 4U.
4U
//! Initialize the QS facility
//!
//! @details
//! This macro provides an indirection layer to invoke the QS initialization
//! routine if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
//! @sa QP::QS::onStartup(), example of setting up a QS filter in
//! QS_GLB_FILTER()
(QP::QS::onStartup(arg_))
//! Cleanup the QS facility
//!
//! @details
//! This macro provides an indirection layer to invoke the QS cleanup
//! routine if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
//! @sa QP::QS::onCleanup()
(QP::QS::onCleanup())
//! macro to handle the QS output from the application
//!
//! @note
//! If this macro is used, the application must define QS::doOutput().
(QP::QS::doOutput())
//! macro to handle the QS-RX input to the application
//!
//! @note
//! If this macro is used, the application must define QS::doInput().
(QP::QS::doInput())
//! Global Filter ON for a given record type `rec_`
//!
//! @details
//! This macro provides an indirection layer to call QP::QS::filterOn()
//! if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
//!
//! @sa
//! - QP::QSpyRecordGroups - QS record groups that can be used as `rec_`
//! - QP::QSpyRecords - individual QS records that can be used as `rec_`
//!
//! @usage
//! The following example shows how to use QS filters:
//! @include qs_filter.cpp
\
(QP::QS::glbFilter_(static_cast<std::int_fast16_t>(rec_)))
//! Local Filter for a given state machine object `qs_id`
//! @details
//! This macro provides an indirection layer to call QS_locFilter_()
//! if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
//!
//! @sa
//! - QP::QSpyIdGroups - QS ID groups that can be used as `qs_id_`
//! - QP::QSpyIdOffsets - QS ID offsets for `qs_id_` (e.g., QS_AP_IDS + 5)
//!
//! The following example shows how to use QS filters:
//! @include qs_filter.cpp
\
(QP::QS::locFilter_(static_cast<std::int_fast16_t>(qs_id_)))
//! Begin an application-specific QS record with entering critical section
//!
//! @details
//! The following example shows how to build a user QS record using the
//! macros QS_BEGIN_ID(), QS_END(), and the formatted output macros:
//! QS_U8(), QS_STR(), etc.
//!
//! @note
//! Must always be used in pair with QS_END()
//!
//! @include qs_user.cpp
\
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_CRIT_STAT_ \
QS_CRIT_E_(); \
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(rec_)); \
QS_TIME_PRE_();
//! End an applicationi-specific QS record with exiting critical section.
//! @sa example for QS_BEGIN_ID()
//! @note Must always be used in pair with QS_BEGIN_ID()
\
QP::QS::endRec_(); \
QS_CRIT_X_(); \
}
//! Flush the QS trace data to the host
//!
//! @details
//! This macro invokes the QP::QS::flush() platform-dependent callback
//! function to flush the QS trace buffer to the host. The function
//! typically busy-waits until all the data in the buffer is sent to
//! the host. This is acceptable only in the initial transient.
(QP::QS::onFlush())
//! Begin an application-specific QS record WITHOUT entering critical section
\
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QP::QS::beginRec_(rec_); \
QS_TIME_PRE_();
//! End an application-specific QS record WITHOUT exiting critical section.
\
QP::QS::endRec_(); \
}
//! Helper macro for checking the global QS filter
\
((static_cast<std::uint_fast8_t>(QP::QS::priv_.glbFilter[ \
static_cast<std::uint_fast8_t>(rec_) >> 3U]) \
& (static_cast<std::uint_fast8_t>(1U) \
<< (static_cast<std::uint_fast8_t>(rec_) & 7U))) != 0U)
//! Helper macro for checking the local QS filter
\
((static_cast<std::uint_fast8_t>(QP::QS::priv_.locFilter \
[static_cast<std::uint_fast8_t>(qs_id_) >> 3U]) \
& (static_cast<std::uint_fast8_t>(1U) \
<< (static_cast<std::uint_fast8_t>(qs_id_) & 7U))) != 0U)
//! Macro to execute user code when a QS record is produced
//!
//! @note
//! This is a dummy definition in case this macro is undefined.
(static_cast<void>(0))
//! Output formatted std::int8_t to the QS record
\
(QP::QS::u8_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>(((width_) << 4U) & 0x7U)) \
| static_cast<std::uint8_t>(QP::QS::I8_ENUM_T)), (data_)))
//! Output formatted std::uint8_t to the QS record
\
(QP::QS::u8_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::U8_T)), (data_)))
//! Output formatted std::int16_t to the QS record
\
(QP::QS::u16_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::I16_T)), (data_)))
//! Output formatted std::uint16_t to the QS record
\
(QP::QS::u16_fmt_(static_cast<std::uint8_t>((((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::U16_T)), (data_)))
//! Output formatted std::int32_t to the QS record
\
(QP::QS::u32_fmt_( \
static_cast<std::uint8_t>((static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::I32_T)), (data_)))
//! Output formatted std::uint32_t to the QS record
\
(QP::QS::u32_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::U32_T)), (data_)))
//! Output formatted std::int64_t to the QS record
\
(QP::QS::u64_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::I64_T)), (data_)))
//! Output formatted std::uint64_t to the QS record
\
(QP::QS::u64_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::U64_T)), (data_)))
//! Output formatted 32-bit floating point number to the QS record
\
(QP::QS::f32_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::F32_T)), (data_)))
//! Output formatted 64-bit floating point number to the QS record
\
(QP::QS::f64_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::F64_T)), (data_)))
//! Output formatted zero-terminated ASCII string to the QS record
(QP::QS::str_fmt_(str_))
//! Output formatted memory block of up to 255 bytes to the QS record
(QP::QS::mem_fmt_((mem_), (size_)))
//! Output formatted enumeration to the QS record
\
(QP::QS::u8_fmt_(static_cast<std::uint8_t>(0x80U | ((group_) << 4U)) \
| static_cast<std::uint8_t>(QP::QS::I8_ENUM_T),\
static_cast<std::uint8_t>(value_)))
//! Output time stamp to a QS record (used in predefined
//! and application-specific trace records)
(QP::QS::u32_raw_(QP::QS::onGetTime()))
(QP::QS::u16_raw_(QP::QS::onGetTime()))
(QP::QS::u8_raw_(QP::QS::onGetTime()))
//! Output formatted object pointer to the QS record
(QP::QS::u32_fmt_(QP::QS::OBJ_T, \
reinterpret_cast<std::uint32_t>(obj_)))
(QP::QS::u16_fmt_(QP::QS::OBJ_T, \
reinterpret_cast<std::uint16_t>(obj_)))
(QP::QS::u8_fmt_(QP::QS::OBJ_T, \
reinterpret_cast<std::uint8_t>(obj_)))
(QP::QS::u64_fmt_(QP::QS::OBJ_T, \
reinterpret_cast<std::uint64_t>(obj_)))
//! Output formatted function pointer to the QS record
(QP::QS::u32_fmt_(QP::QS::FUN_T, \
reinterpret_cast<std::uint32_t>(fun_)))
(QP::QS::u16_fmt_(QP::QS::FUN_T, \
reinterpret_cast<std::uint16_t>(fun_)))
(QP::QS::u8_fmt_(QP::QS::FUN_T, \
reinterpret_cast<std::uint8_t>(fun_)))
(QP::QS::u64_fmt_(QP::QS::FUN_T, \
reinterpret_cast<std::uint64_t>(fun_)))
//! Output formatted event signal (of type QP::QSignal) and
//! the state machine object to the user QS record
\
QP::QS::u32_fmt_(QP::QS::SIG_T, static_cast<std::uint32_t>(sig_)); \
QP::QS::obj_raw_(obj_)
\
QP::QS::u16_fmt_(QP::QS::SIG_T, static_cast<std::uint16_t>(sig_)); \
QP::QS::obj_raw_(obj_)
\
QP::QS::u8_fmt_(QP::QS::SIG_T, static_cast<std::uint8_t>(sig_)); \
QP::QS::obj_raw_(obj_)
//! Output signal dictionary record
//!
//! @details
//! A signal dictionary record associates the numerical value of the signal
//! and the binary address of the state machine that consumes that signal
//! with the human-readable name of the signal.
//!
//! Providing a signal dictionary QS record can vastly improve readability of
//! the QS log, because instead of dealing with cryptic machine addresses the
//! QSpy host utility can display human-readable names.
//!
//! A signal dictionary entry is associated with both the signal value `sig_`
//! and the state machine `obj_`, because signals are required to be unique
//! only within a given state machine and therefore the same numerical values
//! can represent different signals in different state machines.
//!
//! For the "global" signals that have the same meaning in all state machines
//! (such as globally published signals), you can specify a signal dictionary
//! entry with the `obj_` parameter set to NULL.
//!
//! The following example shows the definition of signal dictionary entries
//! in the initial transition of the Table active object. Please note that
//! signals HUNGRY_SIG and DONE_SIG are associated with the Table state
//! machine only ("me" `obj_` pointer). The EAT_SIG signal, on the other
//! hand, is global (0 `obj_` pointer):
//! @include qs_sigDic.cpp
//!
//! @note The QSpy log utility must capture the signal dictionary record
//! in order to use the human-readable information. You need to connect to
//! the target before the dictionary entries have been transmitted.
//!
//! The following QSpy log example shows the signal dictionary records
//! generated from the Table initial transition and subsequent records that
//! show human-readable names of the signals:
//! @include qs_sigLog.txt
//!
//! The following QSpy log example shows the same sequence of records, but
//! with dictionary records removed. The human-readable signal names are not
//! available.
\
(QP::QS::sig_dict_pre_((sig_), (obj_), #sig_))
//! Output object dictionary record
//!
//! @details
//! An object dictionary record associates the binary address of an object
//! in the target's memory with the human-readable name of the object.
//!
//! Providing an object dictionary QS record can vastly improve readability of
//! the QS log, because instead of dealing with cryptic machine addresses the
//! QSpy host utility can display human-readable object names.
//!
//! The following example shows the definition of object dictionary entry
//! for the Table active object:
//! @include qs_objDic.cpp
\
(QP::QS::obj_dict_pre_((obj_), #obj_))
//! Output object-array dictionary record
//!
//! @details
//! An object array dictionary record associates the binary address of the
//! object element in the target's memory with the human-readable name
//! of the object.
//!
//! Providing a dictionary QS record can vastly improve readability of
//! the QS log, because instead of dealing with cryptic machine addresses the
//! QSpy host utility can display human-readable object names.
//!
//! The following example shows the definition of object array dictionary
//! for `Philo::inst[n]` and `Philo::inst[n].m_timeEvt`:
//! @include qs_objDic.cpp
\
(QP::QS::obj_arr_dict_pre_((obj_), (idx_), #obj_))
//! Output function dictionary record
//!
//! @details
//! A function dictionary record associates the binary address of a function
//! in the target's memory with the human-readable name of the function.
//!
//! Providing a function dictionary QS record can vastly improve readability
//! of the QS log, because instead of dealing with cryptic machine addresses
//! the QSpy host utility can display human-readable function names.
//!
//! The example from #QS_SIG_DICTIONARY shows the definition of a function
//! dictionary.
\
(QP::QS::fun_dict_pre_( \
QP::QS::force_cast<void (*)(void)>(fun_), #fun_))
//! Output user QS record dictionary record
//!
//! @details
//! A user QS record dictionary record associates the numerical value of a
//! user record with the human-readable identifier.
\
(QP::QS::usr_dict_pre_((rec_), #rec_))
//! Output enumeration dictionary record
//!
//! @details
//! An enum QS record dictionary record associates the numerical value of
//! an enumeration with the human-readable identifier.
\
(QP::QS::enum_dict_pre_((value_), (group_), #value_))
//! Output the critical section entry record
(QP::QS::crit_entry_pre_())
//! Output the critical section exit record
(QP::QS::crit_exit_pre_())
//! Output the interrupt entry record
\
(QP::QS::isr_entry_pre_((isrnest_), (prio_)))
//! Output the interrupt exit record
\
(QP::QS::isr_exit_pre_((isrnest_), (prio_)))
//! Execute an action that is only necessary for QS output
(act_)
//! Produce the assertion failure trace record
\
(QP::QS::assertion_pre_((module_), (loc_), (delay_)))
//! Constant representing End-Of-Data condition returned from the
//! QS::getByte() function.
(static_cast<std::uint16_t>(0xFFFFU))
//! Constant representing command enumeration group
//! in QS_ENUM_DICTIONARY() and QS_ENUM()
//! @sa QS::onCommand()
(static_cast<std::uint8_t>(7U))
//! Constant representing HEX format for the "width" filed
//! in QS_U8(), QS_U16(), QS_U32(), and QS_U64().
(static_cast<std::uint8_t>(0x0FU))
//! QS ring buffer counter and offset type
= std::uint16_t;
= std::uint32_t;
//! QS time stamp type, which determines the dynamic range of QS time stamps
= std::uint32_t;
= std::uint16_t;
= std::uint8_t;
//! QS function pointer type (for serializing function pointers)
= std::uint32_t;
= std::uint64_t;
= std::uint16_t;
= std::uint8_t;
//! QS pre-defined record types (TX channel)
//!
//! @details
//! This enumeration specifies the record types used in the QP components.
//! You can specify your own record types starting from QP::QS_USER offset.
//! Currently, the maximum of all records cannot exceed 125.
//!
//! @note
//! The QS records labeled as "not maskable" are always enabled and cannot
//! be turend off with the QS_GLB_FILTER() macro. Other QS trace records
//! can be disabled by means of the "global filters"
//!
//! @sa QS_GLB_FILTER() macro
: std::int8_t {
// [0] QS session (not maskable)
QS_EMPTY, //!< QS record for cleanly starting a session
// [1] SM records
QS_QEP_STATE_ENTRY, //!< a state was entered
QS_QEP_STATE_EXIT, //!< a state was exited
QS_QEP_STATE_INIT, //!< an initial transition was taken in a state
QS_QEP_INIT_TRAN, //!< the top-most initial transition was taken
QS_QEP_INTERN_TRAN, //!< an internal transition was taken
QS_QEP_TRAN, //!< a regular transition was taken
QS_QEP_IGNORED, //!< an event was ignored (silently discarded)
QS_QEP_DISPATCH, //!< an event was dispatched (begin of RTC step)
QS_QEP_UNHANDLED, //!< an event was unhandled due to a guard
// [10] Active Object (AO) records
QS_QF_ACTIVE_DEFER, //!< AO deferred an event
QS_QF_ACTIVE_RECALL, //!< AO recalled an event
QS_QF_ACTIVE_SUBSCRIBE, //!< an AO subscribed to an event
QS_QF_ACTIVE_UNSUBSCRIBE,//!< an AO unsubscribed to an event
QS_QF_ACTIVE_POST, //!< an event was posted (FIFO) directly to AO
QS_QF_ACTIVE_POST_LIFO, //!< an event was posted (LIFO) directly to AO
QS_QF_ACTIVE_GET, //!< AO got an event and its queue is not empty
QS_QF_ACTIVE_GET_LAST,//!< AO got an event and its queue is empty
QS_QF_ACTIVE_RECALL_ATTEMPT, //!< AO attempted to recall an event
// [19] Event Queue (EQ) records
QS_QF_EQUEUE_POST, //!< an event was posted (FIFO) to a raw queue
QS_QF_EQUEUE_POST_LIFO, //!< an event was posted (LIFO) to a raw queue
QS_QF_EQUEUE_GET, //!< get an event and queue still not empty
QS_QF_EQUEUE_GET_LAST,//!< get the last event from the queue
// [23] Framework (QF) records
QS_QF_NEW_ATTEMPT, //!< an attempt to allocate an event failed
// [24] Memory Pool (MP) records
QS_QF_MPOOL_GET, //!< a memory block was removed from memory pool
QS_QF_MPOOL_PUT, //!< a memory block was returned to memory pool
// [26] Additional Framework (QF) records
QS_QF_PUBLISH, //!< an event was published to active objects
QS_QF_NEW_REF, //!< new event reference was created
QS_QF_NEW, //!< new event was created
QS_QF_GC_ATTEMPT, //!< garbage collection attempt
QS_QF_GC, //!< garbage collection
QS_QF_TICK, //!< QTimeEvt::tick_() was called
// [32] Time Event (TE) records
QS_QF_TIMEEVT_ARM, //!< a time event was armed
QS_QF_TIMEEVT_AUTO_DISARM, //!< a time event expired and was disarmed
QS_QF_TIMEEVT_DISARM_ATTEMPT,//!< attempt to disarm a disarmed QTimeEvt
QS_QF_TIMEEVT_DISARM, //!< true disarming of an armed time event
QS_QF_TIMEEVT_REARM, //!< rearming of a time event
QS_QF_TIMEEVT_POST, //!< a time event posted itself directly to an AO
// [38] Additional Framework (QF) records
QS_QF_DELETE_REF, //!< an event reference is about to be deleted
QS_QF_CRIT_ENTRY, //!< critical section was entered
QS_QF_CRIT_EXIT, //!< critical section was exited
QS_QF_ISR_ENTRY, //!< an ISR was entered
QS_QF_ISR_EXIT, //!< an ISR was exited
QS_QF_INT_DISABLE, //!< interrupts were disabled
QS_QF_INT_ENABLE, //!< interrupts were enabled
// [45] Additional Active Object (AO) records
QS_QF_ACTIVE_POST_ATTEMPT, //!< attempt to post an evt to AO failed
// [46] Additional Event Queue (EQ) records
QS_QF_EQUEUE_POST_ATTEMPT, //!< attempt to post evt to QEQueue failed
// [47] Additional Memory Pool (MP) records
QS_QF_MPOOL_GET_ATTEMPT, //!< attempt to get a memory block failed
// [48] Scheduler (SC) records
QS_SCHED_PREEMPT, //!< scheduler asynchronously preempted a task
QS_SCHED_RESTORE, //!< scheduler restored preempted task
QS_SCHED_LOCK, //!< scheduler was locked
QS_SCHED_UNLOCK, //!< scheduler was unlocked
QS_SCHED_NEXT, //!< scheduler found next task to execute
QS_SCHED_IDLE, //!< scheduler became idle
// [54] Miscellaneous QS records (not maskable)
QS_ENUM_DICT, //!< enumeration dictionary entry
// [55] Additional QEP records
QS_QEP_TRAN_HIST, //!< a tran to history was taken
QS_QEP_TRAN_EP, //!< a tran to entry point into a submachine
QS_QEP_TRAN_XP, //!< a tran to exit point out of a submachine
// [58] Miscellaneous QS records (not maskable)
QS_TEST_PAUSED, //!< test has been paused
QS_TEST_PROBE_GET, //!< reports that Test-Probe has been used
QS_SIG_DICT, //!< signal dictionary entry
QS_OBJ_DICT, //!< object dictionary entry
QS_FUN_DICT, //!< function dictionary entry
QS_USR_DICT, //!< user QS record dictionary entry
QS_TARGET_INFO, //!< reports the Target information
QS_TARGET_DONE, //!< reports completion of a user callback
QS_RX_STATUS, //!< reports QS data receive status
QS_QUERY_DATA, //!< reports the data from "current object" query
QS_PEEK_DATA, //!< reports the data from the PEEK query
QS_ASSERT_FAIL, //!< assertion failed in the code
QS_QF_RUN, //!< QF_run() was entered
// [71] Semaphore (SEM) records
QS_SEM_TAKE, //!< a semaphore was taken by a thread
QS_SEM_BLOCK, //!< a semaphore blocked a thread
QS_SEM_SIGNAL, //!< a semaphore was signaled
QS_SEM_BLOCK_ATTEMPT, //!< a semaphore blocked was attempted
// [75] Mutex (MTX) records
QS_MTX_LOCK, //!< a mutex was locked
QS_MTX_BLOCK, //!< a mutex blocked a thread
QS_MTX_UNLOCK, //!< a mutex was unlocked
QS_MTX_LOCK_ATTEMPT, //!< a mutex lock was attempted
QS_MTX_BLOCK_ATTEMPT, //!< a mutex blocking was attempted
QS_MTX_UNLOCK_ATTEMPT,//!< a mutex unlock was attempted
// [81]
QS_PRE_MAX, //!< the number of predefined signals
};
//! QS record groups for QS_GLB_FILTER()
: std::int16_t {
QS_ALL_RECORDS = static_cast<std::uint8_t>(0xF0U), //!< all QS records
QS_SM_RECORDS, //!< State Machine QS records
QS_AO_RECORDS, //!< Active Object QS records
QS_EQ_RECORDS, //!< Event Queues QS records
QS_MP_RECORDS, //!< Memory Pools QS records
QS_TE_RECORDS, //!< Time Events QS records
QS_QF_RECORDS, //!< QF QS records
QS_SC_RECORDS, //!< Scheduler QS records
QS_SEM_RECORDS, //!< Semaphore QS records
QS_MTX_RECORDS, //!< Mutex QS records
QS_U0_RECORDS, //!< User Group 100-104 records
QS_U1_RECORDS, //!< User Group 105-109 records
QS_U2_RECORDS, //!< User Group 110-114 records
QS_U3_RECORDS, //!< User Group 115-119 records
QS_U4_RECORDS, //!< User Group 120-124 records
QS_UA_RECORDS, //!< All User records
};
//! QS user record group offsets for QS_GLB_FILTER()
: std::int16_t {
QS_USER = 100, //!< the first record available to QS users
QS_USER0 = QS_USER, //!< offset for User Group 0
QS_USER1 = QS_USER0 + 5, //!< offset of Group 1
QS_USER2 = QS_USER1 + 5, //!< offset of Group 2
QS_USER3 = QS_USER2 + 5, //!< offset of Group 3
QS_USER4 = QS_USER3 + 5, //!< offset of Group 4
};
//! QS ID offsets for QS_LOC_FILTER()
: std::int16_t {
QS_AO_ID = 0, //!< offset for AO priorities
QS_EP_ID = 64, //!< offset for event-pool IDs
QS_EQ_ID = 80, //!< offset for event-queue IDs
QS_AP_ID = 96, //!< offset for Appl-spec IDs
};
//! QS ID groups for QS_LOC_FILTER()
: std::int16_t {
QS_ALL_IDS = 0xF0, //!< all QS IDs
QS_AO_IDS = 0x80 + QS_AO_ID, //!< AO IDs (priorities)
QS_EP_IDS = 0x80 + QS_EP_ID, //!< event-pool IDs
QS_EQ_IDS = 0x80 + QS_EQ_ID, //!< event-queue IDs
QS_AP_IDS = 0x80 + QS_AP_ID, //!< Application-specific IDs
};
//! function pointer type for fun_dict_pre_()
= void (*)();
//! QS ID type for applying local filtering
{
std::uint8_t m_prio; //!< "priority" (qs_id) for the QS "local filter"
//! get the "priority" (qs_id) from the QSpyId opbject
std::uint_fast8_t getPrio() const noexcept {
return static_cast<std::uint_fast8_t>(m_prio);
}
};
//! QS software tracing, output QS-TX
//! global on/off QS filter
//! local on/off QS filter
//! old local QS filter
// @deprecated
//! pointer to the start of the QS-TX ring buffer
//! offset of the end of the ring buffer
//! offset to where next byte will be inserted
//! offset of where next record will be extracted
//! number of bytes currently in the ring buffer
//! sequence number of the last inserted QS record
//! checksum of the currently inserted record
//! critical section nesting level
//! flags for internal use
//! Enumerates data elements for app-specific trace records
: std::uint8_t {
I8_ENUM_T, //!< signed 8-bit integer or enum format
U8_T, //!< unsigned 8-bit integer format
I16_T, //!< signed 16-bit integer format
U16_T, //!< unsigned 16-bit integer format
I32_T, //!< signed 32-bit integer format
U32_T, //!< unsigned 32-bit integer format
F32_T, //!< 32-bit floating point format
F64_T, //!< 64-bit floating point format
STR_T, //!< zero-terminated ASCII string format
MEM_T, //!< up to 255-bytes memory block format
SIG_T, //!< event signal format
OBJ_T, //!< object pointer format
FUN_T, //!< function pointer format
I64_T, //!< signed 64-bit integer format
U64_T, //!< unsigned 64-bit integer format
};
//! the only instance of the QS-TX object (Singleton)
//! template for forcing cast of member functions for function
//! dictionaries and test probes.
//!
//! @tparam T_OUT type of the returned representation
//! @tparam T_IN type of the provided representation
//!
//! @returns the binary representation of `T_IN` as `T_OUT`
union TCast {
T_IN in;
T_OUT out;
} u = { in };
return u.out;
noexcept
//! Initialize the QS data buffer
//!
//! @details
//! This function should be called from QP::QS::onStartup() to provide
//! QS with the data buffer. The first argument `sto[]` is the address
//! of the memory block, and the second argument `stoSize` is the size
//! of this block [in bytes]. Currently the size of the QS buffer cannot
//! exceed 64KB.
//!
//! @note
//! QS can work with quite small data buffers, but you will start losing
//! data if the buffer is too small for the bursts of tracing activity.
//! The right size of the buffer depends on the data production rate and
//! the data output rate. QS offers flexible filtering to reduce the data
//! production rate.
//!
//! @note
//! If the data output rate cannot keep up with the production rate,
//! QS will start overwriting the older data with newer data. This is
//! consistent with the "last-is-best" QS policy. The record sequence
//! counters and check sums on each record allow the QSPY host utility
//! to easily detect any data loss.
// the provided buffer must be at least 8 bytes long
Q_REQUIRE_ID(100, stoSize > 8U);
// This function initializes all the internal QS variables, so that the
// tracing can start correctly even if the startup code fails to clear
// any uninitialized data (as is required by the C Standard).
//
glbFilter_(-static_cast<enum_t>(QS_ALL_RECORDS));// all global filters OFF
locFilter_(static_cast<enum_t>(QS_ALL_IDS)); // all local filters ON
priv_.locFilter_AP = nullptr; // deprecated "AP-filter"
priv_.buf = sto;
priv_.end = static_cast<QSCtr>(stoSize);
priv_.head = 0U;
priv_.tail = 0U;
priv_.used = 0U;
priv_.seq = 0U;
priv_.chksum = 0U;
priv_.critNest = 0U;
// produce an empty record to "flush" the QS trace buffer
beginRec_(QS_REC_NUM_(QS_EMPTY));
endRec_();
// produce the Target info QS record
target_info_pre_(0xFFU);
// wait with flushing after successful initialization (see QS_INIT())
noexcept
//! Byte-oriented interface to the QS data buffer
//!
//! @details
//! This function delivers one byte at a time from the QS data buffer.
//!
//! @returns
//! the byte in the least-significant 8-bits of the 16-bit return
//! value if the byte is available. If no more data is available at the
//! time, the function returns ::QS_EOD (End-Of-Data).
//!
//! @note
//! QS::getByte() is NOT protected with a critical section.
std::uint16_t ret;
if (priv_.used == 0U) {
ret = QS_EOD; // set End-Of-Data
}
else {
std::uint8_t const * const buf_ = priv_.buf; // put in a temporary
QSCtr tail_ = priv_.tail; // put in a temporary (register)
// the byte to return
ret = static_cast<std::uint16_t>(buf_[tail_]);
++tail_; // advance the tail
if (tail_ == priv_.end) { // tail wrap around?
tail_ = 0U;
}
priv_.tail = tail_; // update the tail
priv_.used = (priv_.used - 1U); // one less byte used
}
return ret; // return the byte or EOD
noexcept
//! Block-oriented interface to the QS data buffer
//!
//! @details
//! This function delivers a contiguous block of data from the QS data
//! buffer. The function returns the pointer to the beginning of the
//! block, and writes the number of bytes in the block to the location
//! pointed to by `pNbytes`. The argument `pNbytes` is also used as
//! input to provide the maximum size of the data block that the caller
//! can accept.
//!
//! @returns
//! if data is available, the function returns pointer to the
//! contiguous block of data and sets the value pointed to by `pNbytes`
//! to the # available bytes. If data is available at the time the
//! function is called, the function returns NULL pointer and sets the
//! value pointed to by `pNbytes` to zero.
//!
//! @note
//! Only the NULL return from QP::QS::getBlock() indicates that the QS
//! buffer is empty at the time of the call. The non-NULL return often
//! means that the block is at the end of the buffer and you need to call
//! QP::QS::getBlock() again to obtain the rest of the data that
//! "wrapped around" to the beginning of the QS data buffer.
//!
//! @note QP::QS::getBlock() is __not__ protected with a critical section.
QSCtr const used_ = priv_.used; // put in a temporary (register)
std::uint8_t *buf_;
// any bytes used in the ring buffer?
if (used_ == 0U) {
*pNbytes = 0U; // no bytes available right now
buf_ = nullptr; // no bytes available right now
}
else {
QSCtr tail_ = priv_.tail; // put in a temporary (register)
QSCtr const end_ = priv_.end; // put in a temporary (register)
QSCtr n = static_cast<QSCtr>(end_ - tail_);
if (n > used_) {
n = used_;
}
if (n > static_cast<QSCtr>(*pNbytes)) {
n = static_cast<QSCtr>(*pNbytes);
}
*pNbytes = static_cast<std::uint16_t>(n); // n-bytes available
buf_ = priv_.buf;
buf_ = &buf_[tail_]; // the bytes are at the tail
priv_.used = (priv_.used - n);
tail_ += n;
if (tail_ == end_) {
tail_ = 0U;
}
priv_.tail = tail_;
}
return buf_;
noexcept
//! Set/clear the global Filter for a given QS record
//! or a group of records
//!
//! @details
//! This function sets up the QS filter to enable record types specified
//! in the `filter` parameter. The value #QS_ALL_RECORDS specifies to
//! filter-in all records. This function should be called indirectly
//! through the macro QS_GLB_FILTER()
//!
//! @param[in] filter the QS record-d or group to enable in the filter,
//! if positive or disable, if negative. The record-id
//! numbers must be in the range -127..127.
//! @note
//! Filtering based on the record-type is only the first layer of
//! filtering. The second layer is based on the object-type. Both filter
//! layers must be enabled for the QS record to be inserted in the
//! QS buffer.
//!
//! @sa QP::QS::locFilter_()
bool const isRemove = (filter < 0);
std::uint16_t const rec = isRemove
? static_cast<std::uint16_t>(-filter)
: static_cast<std::uint16_t>(filter);
switch (rec) {
case QS_ALL_RECORDS: {
std::uint8_t const tmp = (isRemove ? 0x00U : 0xFFU);
std::uint_fast8_t i;
// set all global filters (partially unrolled loop)
for (i = 0U; i < Q_DIM(priv_.glbFilter); i += 4U) {
priv_.glbFilter[i ] = tmp;
priv_.glbFilter[i + 1U] = tmp;
priv_.glbFilter[i + 2U] = tmp;
priv_.glbFilter[i + 3U] = tmp;
}
if (isRemove) {
// leave the "not maskable" filters enabled,
// see qs.h, Miscellaneous QS records (not maskable)
//
priv_.glbFilter[0] = 0x01U;
priv_.glbFilter[6] = 0x40U;
priv_.glbFilter[7] = 0xFCU;
priv_.glbFilter[8] = 0x7FU;
}
else {
// never turn the last 3 records on (0x7D, 0x7E, 0x7F)
priv_.glbFilter[15] = 0x1FU;
}
break;
}
case QS_SM_RECORDS:
if (isRemove) {
priv_.glbFilter[0] &=
static_cast<std::uint8_t>(~0xFEU & 0xFFU);
priv_.glbFilter[1] &=
static_cast<std::uint8_t>(~0x03U & 0xFFU);
priv_.glbFilter[6] &=
static_cast<std::uint8_t>(~0x80U & 0xFFU);
priv_.glbFilter[7] &=
static_cast<std::uint8_t>(~0x03U & 0xFFU);
}
else {
priv_.glbFilter[0] |= 0xFEU;
priv_.glbFilter[1] |= 0x03U;
priv_.glbFilter[6] |= 0x80U;
priv_.glbFilter[7] |= 0x03U;
}
break;
case QS_AO_RECORDS:
if (isRemove) {
priv_.glbFilter[1] &=
static_cast<std::uint8_t>(~0xFCU & 0xFFU);
priv_.glbFilter[2] &=
static_cast<std::uint8_t>(~0x07U & 0xFFU);
priv_.glbFilter[5] &=
static_cast<std::uint8_t>(~0x20U & 0xFFU);
}
else {
priv_.glbFilter[1] |= 0xFCU;
priv_.glbFilter[2] |= 0x07U;
priv_.glbFilter[5] |= 0x20U;
}
break;
case QS_EQ_RECORDS:
if (isRemove) {
priv_.glbFilter[2] &=
static_cast<std::uint8_t>(~0x78U & 0xFFU);
priv_.glbFilter[5] &=
static_cast<std::uint8_t>(~0x40U & 0xFFU);
}
else {
priv_.glbFilter[2] |= 0x78U;
priv_.glbFilter[5] |= 0x40U;
}
break;
case QS_MP_RECORDS:
if (isRemove) {
priv_.glbFilter[3] &=
static_cast<std::uint8_t>(~0x03U & 0xFFU);
priv_.glbFilter[5] &=
static_cast<std::uint8_t>(~0x80U & 0xFFU);
}
else {
priv_.glbFilter[3] |= 0x03U;
priv_.glbFilter[5] |= 0x80U;
}
break;
case QS_QF_RECORDS:
if (isRemove) {
priv_.glbFilter[2] &=
static_cast<std::uint8_t>(~0x80U & 0xFFU);
priv_.glbFilter[3] &=
static_cast<std::uint8_t>(~0xFCU & 0xFFU);
priv_.glbFilter[4] &=
static_cast<std::uint8_t>(~0xC0U & 0xFFU);
priv_.glbFilter[5] &=
static_cast<std::uint8_t>(~0x1FU & 0xFFU);
}
else {
priv_.glbFilter[2] |= 0x80U;
priv_.glbFilter[3] |= 0xFCU;
priv_.glbFilter[4] |= 0xC0U;
priv_.glbFilter[5] |= 0x1FU;
}
break;
case QS_TE_RECORDS:
if (isRemove) {
priv_.glbFilter[4] &=
static_cast<std::uint8_t>(~0x3FU & 0xFFU);
}
else {
priv_.glbFilter[4] |= 0x3FU;
}
break;
case QS_SC_RECORDS:
if (isRemove) {
priv_.glbFilter[6] &=
static_cast<std::uint8_t>(~0x3FU & 0xFFU);
}
else {
priv_.glbFilter[6] |= 0x3FU;
}
break;
case QS_SEM_RECORDS:
if (isRemove) {
priv_.glbFilter[8] &=
static_cast<std::uint8_t>(~0x80U & 0xFFU);
priv_.glbFilter[9] &=
static_cast<std::uint8_t>(~0x07U & 0xFFU);
}
else {
priv_.glbFilter[8] |= 0x80U;
priv_.glbFilter[9] |= 0x07U;
}
break;
case QS_MTX_RECORDS:
if (isRemove) {
priv_.glbFilter[9] &=
static_cast<std::uint8_t>(~0xF8U & 0xFFU);
priv_.glbFilter[10] &=
static_cast<std::uint8_t>(~0x01U & 0xFFU);
}
else {
priv_.glbFilter[9] |= 0xF8U;
priv_.glbFilter[10] |= 0x01U;
}
break;
case QS_U0_RECORDS:
if (isRemove) {
priv_.glbFilter[12] &=
static_cast<std::uint8_t>(~0xF0U & 0xFFU);
priv_.glbFilter[13] &=
static_cast<std::uint8_t>(~0x01U & 0xFFU);
}
else {
priv_.glbFilter[12] |= 0xF0U;
priv_.glbFilter[13] |= 0x01U;
}
break;
case QS_U1_RECORDS:
if (isRemove) {
priv_.glbFilter[13] &=
static_cast<std::uint8_t>(~0x3EU & 0xFFU);
}
else {
priv_.glbFilter[13] |= 0x3EU;
}
break;
case QS_U2_RECORDS:
if (isRemove) {
priv_.glbFilter[13] &=
static_cast<std::uint8_t>(~0xC0U & 0xFFU);
priv_.glbFilter[14] &=
static_cast<std::uint8_t>(~0x07U & 0xFFU);
}
else {
priv_.glbFilter[13] |= 0xC0U;
priv_.glbFilter[14] |= 0x07U;
}
break;
case QS_U3_RECORDS:
if (isRemove) {
priv_.glbFilter[14] &=
static_cast<std::uint8_t>(~0xF8U & 0xFFU);
}
else {
priv_.glbFilter[14] |= 0xF8U;
}
break;
case QS_U4_RECORDS:
if (isRemove) {
priv_.glbFilter[15] &= 0x1FU;
}
else {
priv_.glbFilter[15] |= 0x1FU;
}
break;
case QS_UA_RECORDS:
if (isRemove) {
priv_.glbFilter[12] &=
static_cast<std::uint8_t>(~0xF0U & 0xFFU);
priv_.glbFilter[13] = 0U;
priv_.glbFilter[14] = 0U;
priv_.glbFilter[15] &=
static_cast<std::uint8_t>(~0x1FU & 0xFFU);
}
else {
priv_.glbFilter[12] |= 0xF0U;
priv_.glbFilter[13] |= 0xFFU;
priv_.glbFilter[14] |= 0xFFU;
priv_.glbFilter[15] |= 0x1FU;
}
break;
default:
// QS rec number can't exceed 0x7D, so no need for escaping
Q_ASSERT_ID(210, rec < 0x7DU);
if (isRemove) {
priv_.glbFilter[rec >> 3U]
&= static_cast<std::uint8_t>(~(1U << (rec & 7U)) & 0xFFU);
}
else {
priv_.glbFilter[rec >> 3U]
|= static_cast<std::uint8_t>(1U << (rec & 7U));
// never turn the last 3 records on (0x7D, 0x7E, 0x7F)
priv_.glbFilter[15] &= 0x1FU;
}
break;
}
noexcept
//! Set/clear the local Filter for a given object-id
//! or a group of object-ids
//!
//! @details
//! This function sets up the local QS filter to enable or disable the
//! given QS object-id or a group of object-ids @a filter.
//! This function should be called indirectly through the macro
//! QS_LOC_FILTER()
//!
//! @param[in] filter the QS object-id or group to enable in the filter,
//! if positive or disable, if negative. The qs_id numbers
//! must be in the range 1..127.
//! @note
//! Filtering based on the object-id (local filter) is the second layer
//! of filtering. The first layer is based on the QS record-type (global
//! filter). Both filter layers must be enabled for the QS record to be
//! inserted into the QS buffer.
//!
//! @sa QP::QS::glbFilter_()
bool const isRemove = (filter < 0);
std::uint16_t const qs_id = isRemove
? static_cast<std::uint16_t>(-filter)
: static_cast<std::uint16_t>(filter);
std::uint8_t const tmp = (isRemove ? 0x00U : 0xFFU);
std::uint_fast8_t i;
switch (qs_id) {
case QS_ALL_IDS:
// set all global filters (partially unrolled loop)
for (i = 0U; i < Q_DIM(priv_.locFilter); i += 4U) {
priv_.locFilter[i ] = tmp;
priv_.locFilter[i + 1U] = tmp;
priv_.locFilter[i + 2U] = tmp;
priv_.locFilter[i + 3U] = tmp;
}
break;
case QS_AO_IDS:
for (i = 0U; i < 8U; i += 4U) {
priv_.locFilter[i ] = tmp;
priv_.locFilter[i + 1U] = tmp;
priv_.locFilter[i + 2U] = tmp;
priv_.locFilter[i + 3U] = tmp;
}
break;
case QS_EP_IDS:
i = 8U;
priv_.locFilter[i ] = tmp;
priv_.locFilter[i + 1U] = tmp;
break;
case QS_AP_IDS:
i = 12U;
priv_.locFilter[i ] = tmp;
priv_.locFilter[i + 1U] = tmp;
priv_.locFilter[i + 2U] = tmp;
priv_.locFilter[i + 3U] = tmp;
break;
default:
if (qs_id < 0x7FU) {
if (isRemove) {
priv_.locFilter[qs_id >> 3U] &=
static_cast<std::uint8_t>(
~(1U << (qs_id & 7U)) & 0xFFU);
}
else {
priv_.locFilter[qs_id >> 3U]
|= (1U << (qs_id & 7U));
}
}
else {
Q_ERROR_ID(310); // incorrect qs_id
}
break;
}
priv_.locFilter[0] |= 0x01U; // leave QS_ID == 0 always on
//! Perform the QS-TX output (implemented in some QS ports)
noexcept
//! Mark the begin of a QS record `rec`
//!
//! @details
//! This function must be called at the beginning of each QS record.
//! This function should be called indirectly through the macro
//! QS_BEGIN_ID(), or QS_BEGIN_NOCRIT(), depending if it's called in
//! a normal code or from a critical section.
std::uint8_t const b = priv_.seq + 1U;
std::uint8_t chksum_ = 0U; // reset the checksum
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_ = priv_.end; // put in a temporary (register)
priv_.seq = b; // store the incremented sequence num
priv_.used = (priv_.used + 2U); // 2 bytes about to be added
QS_INSERT_ESC_BYTE_(b)
chksum_ += static_cast<std::uint8_t>(rec);
QS_INSERT_BYTE_(static_cast<std::uint8_t>(rec)) // no need for escaping
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Mark the end of a QS record `rec`
//!
//! @details
//! This function must be called at the end of each QS record.
//! This function should be called indirectly through the macro QS_END(),
//! or QS_END_NOCRIT(), depending if it's called in a normal code or from
//! a critical section.
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head;
QSCtr const end_ = priv_.end;
std::uint8_t b = priv_.chksum;
b ^= 0xFFU; // invert the bits in the checksum
priv_.used = (priv_.used + 2U); // 2 bytes about to be added
if ((b != QS_FRAME) && (b != QS_ESC)) {
QS_INSERT_BYTE_(b)
}
else {
QS_INSERT_BYTE_(QS_ESC)
QS_INSERT_BYTE_(b ^ QS_ESC_XOR)
priv_.used = (priv_.used + 1U); // account for the ESC byte
}
QS_INSERT_BYTE_(QS_FRAME) // do not escape this QS_FRAME
priv_.head = head_; // save the head
if (priv_.used > end_) { // overrun over the old data?
priv_.used = end_; // the whole buffer is used
priv_.tail = head_; // shift the tail to the old data
}
noexcept
//! output std::uint8_t data element without format information
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 1U); // 1 byte about to be added
QS_INSERT_ESC_BYTE_(d)
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! output two std::uint8_t data elements without format information
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 2U); // 2 bytes about to be added
QS_INSERT_ESC_BYTE_(d1)
QS_INSERT_ESC_BYTE_(d2)
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output std::uint16_t data element without format information
std::uint8_t b = static_cast<std::uint8_t>(d);
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 2U); // 2 bytes about to be added
QS_INSERT_ESC_BYTE_(b)
d >>= 8U;
b = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(b)
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output std::uint32_t data element without format information
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 4U); // 4 bytes about to be added
for (std::uint_fast8_t i = 4U; i != 0U; --i) {
std::uint8_t const b = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(b)
d >>= 8U;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output object pointer data element without format information
#if (QS_OBJ_PTR_SIZE == 1U)
u8_raw_(reinterpret_cast<std::uint8_t>(obj));
#elif (QS_OBJ_PTR_SIZE == 2U)
u16_raw_(reinterpret_cast<std::uint16_t>(obj));
#elif (QS_OBJ_PTR_SIZE == 4U)
u32_raw_(reinterpret_cast<std::uint32_t>(obj));
#elif (QS_OBJ_PTR_SIZE == 8U)
u64_raw_(reinterpret_cast<std::uint64_t>(obj));
#else
u32_raw_(reinterpret_cast<std::uint32_t>(obj));
#endif
noexcept
//! Output zero-terminated ASCII string element without format information
std::uint8_t b = static_cast<std::uint8_t>(*s);
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
QSCtr used_ = priv_.used; // put in a temporary (register)
while (b != 0U) {
chksum_ += b; // update checksum
QS_INSERT_BYTE_(b) // ASCII characters don't need escaping
++s;
b = static_cast<std::uint8_t>(*s);
++used_;
}
QS_INSERT_BYTE_(0U) // zero-terminate the string
++used_;
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
priv_.used = used_; // save # of used buffer space
noexcept
//! Output std::uint8_t data element with format information
//! @sa QS_U8(), QS_I8()
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t *const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 2U); // 2 bytes about to be added
QS_INSERT_ESC_BYTE_(format)
QS_INSERT_ESC_BYTE_(d)
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output std::uint16_t data element with format information
//! @sa QS_U16(), QS_I16()
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 3U); // 3 bytes about to be added
QS_INSERT_ESC_BYTE_(format)
format = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(format)
d >>= 8U;
format = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(format)
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output std::uint32_t data element with format information
//! @sa QS_U32(), QS_I32()
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_= priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + 5U); // 5 bytes about to be added
QS_INSERT_ESC_BYTE_(format) // insert the format byte
for (std::uint_fast8_t i = 4U; i != 0U; --i) {
format = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(format)
d >>= 8U;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output zero-terminated ASCII string element with format information
//! @sa QS_STR()
std::uint8_t b = static_cast<std::uint8_t>(*s);
std::uint8_t chksum_ = static_cast<std::uint8_t>(
priv_.chksum + static_cast<std::uint8_t>(STR_T));
std::uint8_t * const buf_= priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
QSCtr used_ = priv_.used; // put in a temporary (register)
used_ += 2U; // the format byte and the terminating-0
QS_INSERT_BYTE_(static_cast<std::uint8_t>(STR_T))
while (b != 0U) {
// ASCII characters don't need escaping
chksum_ += b; // update checksum
QS_INSERT_BYTE_(b)
++s;
b = static_cast<std::uint8_t>(*s);
++used_;
}
QS_INSERT_BYTE_(0U) // zero-terminate the string
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
priv_.used = used_; // save # of used buffer space
noexcept
//! Output memory block of up to 255-bytes with format information
//! @sa QS_MEM()
std::uint8_t b = static_cast<std::uint8_t>(MEM_T);
std::uint8_t chksum_ = priv_.chksum + b;
std::uint8_t * const buf_= priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_= priv_.end; // put in a temporary (register)
priv_.used = (priv_.used + size + 2U); // size+2 bytes to be added
QS_INSERT_BYTE_(b)
QS_INSERT_ESC_BYTE_(size)
// output the 'size' number of bytes
for (; size != 0U; --size) {
b = *blk;
QS_INSERT_ESC_BYTE_(b)
++blk;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output signal dictionary record
//! @sa QS_SIG_DICTIONARY()
QS_CRIT_STAT_
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_SIG_DICT));
QS_SIG_PRE_(sig);
QS_OBJ_PRE_(obj);
QS_STR_PRE_((*name == '&') ? &name[1] : name);
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! Output object dictionary record
//! @sa QS_OBJ_DICTIONARY()
QS_CRIT_STAT_
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_OBJ_DICT));
QS_OBJ_PRE_(obj);
QS_STR_PRE_((*name == '&') ? &name[1] : name);
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! Output predefined object-array dictionary record
//! @sa QS_OBJ_ARR_DICTIONARY()
Q_REQUIRE_ID(400, idx < 1000U);
// format idx into a char buffer as "xxx\0"
std::uint8_t idx_str[4];
std::uint_fast16_t tmp = idx;
std::uint8_t i;
idx_str[3] = 0U; // zero-terminate
idx_str[2] = static_cast<std::uint8_t>(
static_cast<std::uint8_t>('0') + (tmp % 10U));
tmp /= 10U;
idx_str[1] = static_cast<std::uint8_t>(
static_cast<std::uint8_t>('0') + (tmp % 10U));
if (idx_str[1] == static_cast<std::uint8_t>('0')) {
i = 2U;
}
else {
tmp /= 10U;
idx_str[0] = static_cast<std::uint8_t>(
static_cast<std::uint8_t>('0') + (tmp % 10U));
if (idx_str[0] == static_cast<std::uint8_t>('0')) {
i = 1U;
}
else {
i = 0U;
}
}
QS_CRIT_STAT_
std::uint8_t j = ((*name == '&') ? 1U : 0U);
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_OBJ_DICT));
QS_OBJ_PRE_(obj);
for (; name[j] != '\0'; ++j) {
QS_U8_PRE_(name[j]);
if (name[j] == '[') {
++j;
break;
}
}
for (; idx_str[i] != 0U; ++i) {
QS_U8_PRE_(idx_str[i]);
}
// skip chars until ']'
for (; name[j] != '\0'; ++j) {
if (name[j] == ']') {
break;
}
}
for (; name[j] != '\0'; ++j) {
QS_U8_PRE_(name[j]);
}
QS_U8_PRE_(0U); // zero-terminate
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! Output function dictionary record
//! @sa QS_FUN_DICTIONARY()
QS_CRIT_STAT_
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_FUN_DICT));
QS_FUN_PRE_(fun);
QS_STR_PRE_((*name == '&') ? &name[1] : name);
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! Output user dictionary record
//! @sa QS_USR_DICTIONARY()
QS_CRIT_STAT_
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_USR_DICT));
QS_U8_PRE_(rec);
QS_STR_PRE_(name);
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! Output enumeration dictionary record
//! @sa QS_ENUM_DICTIONARY()
QS_CRIT_STAT_
QS_CRIT_E_();
beginRec_(static_cast<std::uint_fast8_t>(QS_ENUM_DICT));
QS_2U8_PRE_(static_cast<std::uint8_t>(value), group);
QS_STR_PRE_(name);
endRec_();
QS_CRIT_X_();
onFlush();
noexcept
//! internal function to produce the assertion failure trace record
//! @sa QS_ASSERTION()
QS_BEGIN_NOCRIT_PRE_(QP::QS_ASSERT_FAIL, 0U)
QS_TIME_PRE_();
QS_U16_PRE_(loc);
QS_STR_PRE_((module != nullptr) ? module : "?");
QS_END_NOCRIT_PRE_()
QP::QS::onFlush();
for (std::uint32_t volatile ctr = delay; ctr > 0U; ) {
ctr = (ctr - 1U);
}
QP::QS::onCleanup();
noexcept
//! internal function to produce the critical section entry record
//! @sa QF_QS_CRIT_ENTRY()
QS_BEGIN_NOCRIT_PRE_(QP::QS_QF_CRIT_ENTRY, 0U)
QS_TIME_PRE_();
QS::priv_.critNest = (QS::priv_.critNest + 1U);
QS_U8_PRE_(QS::priv_.critNest);
QS_END_NOCRIT_PRE_()
noexcept
//! internal function to produce the critical section exit record
//! @sa QF_QS_CRIT_EXIT()
QS_BEGIN_NOCRIT_PRE_(QP::QS_QF_CRIT_EXIT, 0U)
QS_TIME_PRE_();
QS_U8_PRE_(QS::priv_.critNest);
QS::priv_.critNest = (QS::priv_.critNest - 1U);
QS_END_NOCRIT_PRE_()
noexcept
//! internal function to produce the ISR entry record
//! @sa QF_QS_ISR_ENTRY()
QS_BEGIN_NOCRIT_PRE_(QP::QS_QF_ISR_ENTRY, 0U)
QS_TIME_PRE_();
QS_U8_PRE_(isrnest);
QS_U8_PRE_(prio);
QS_END_NOCRIT_PRE_()
noexcept
//! internal function to produce the ISR exit record
//! @sa QF_QS_ISR_EXIT()
QS_BEGIN_NOCRIT_PRE_(QP::QS_QF_ISR_EXIT, 0U)
QS_TIME_PRE_();
QS_U8_PRE_(isrnest);
QS_U8_PRE_(prio);
QS_END_NOCRIT_PRE_()
//! Helper function to output the predefined Target-info trace record.
static constexpr std::uint8_t ZERO = static_cast<std::uint8_t>('0');
static std::uint8_t const * const TIME =
reinterpret_cast<std::uint8_t const *>(&BUILD_TIME[0]);
static std::uint8_t const * const DATE =
reinterpret_cast<std::uint8_t const *>(&BUILD_DATE[0]);
QS::beginRec_(static_cast<std::uint_fast8_t>(QS_TARGET_INFO));
QS::u8_raw_(isReset);
static union {
std::uint16_t u16;
std::uint8_t u8[2];
} endian_test;
endian_test.u16 = 0x0102U;
// big endian ? add the 0x8000U flag
QS_U16_PRE_(((endian_test.u8[0] == 0x01U)
? (0x8000U | QP_VERSION)
: QP_VERSION)); // target endianness + version number
// send the object sizes...
QS::u8_raw_(Q_SIGNAL_SIZE
| static_cast<std::uint8_t>(QF_EVENT_SIZ_SIZE << 4U));
#ifdef QF_EQUEUE_CTR_SIZE
QS::u8_raw_(QF_EQUEUE_CTR_SIZE
| static_cast<std::uint8_t>(QF_TIMEEVT_CTR_SIZE << 4U));
#else
QS::u8_raw_(static_cast<std::uint8_t>(QF_TIMEEVT_CTR_SIZE << 4U));
#endif // ifdef QF_EQUEUE_CTR_SIZE
#ifdef QF_MPOOL_CTR_SIZE
QS::u8_raw_(QF_MPOOL_SIZ_SIZE
| static_cast<std::uint8_t>(QF_MPOOL_CTR_SIZE << 4U));
#else
QS::u8_raw_(0U);
#endif // ifdef QF_MPOOL_CTR_SIZE
QS::u8_raw_(QS_OBJ_PTR_SIZE | (QS_FUN_PTR_SIZE << 4U));
QS::u8_raw_(QS_TIME_SIZE);
// send the limits...
QS::u8_raw_(QF_MAX_ACTIVE);
QS::u8_raw_(QF_MAX_EPOOL | (QF_MAX_TICK_RATE << 4U));
// send the build time in three bytes (sec, min, hour)...
QS::u8_raw_((10U * (TIME[6] - ZERO)) + (TIME[7] - ZERO));
QS::u8_raw_((10U * (TIME[3] - ZERO)) + (TIME[4] - ZERO));
if (BUILD_TIME[0] == static_cast<std::uint8_t>(' ')) {
QS::u8_raw_(TIME[1] - ZERO);
}
else {
QS::u8_raw_((10U * (TIME[0] - ZERO)) + (TIME[1] - ZERO));
}
// send the build date in three bytes (day, month, year) ...
if (BUILD_DATE[4] == static_cast<std::uint8_t>(' ')) {
QS::u8_raw_(DATE[5] - ZERO);
}
else {
QS::u8_raw_((10U * (DATE[4] - ZERO)) + (DATE[5] - ZERO));
}
// convert the 3-letter month to a number 1-12 ...
std::uint8_t b;
switch (DATE[0] + DATE[1] + DATE[2]) {
case 'J' + 'a' +'n':
b = 1U;
break;
case 'F' + 'e' + 'b':
b = 2U;
break;
case 'M' + 'a' +'r':
b = 3U;
break;
case 'A' + 'p' + 'r':
b = 4U;
break;
case 'M' + 'a' + 'y':
b = 5U;
break;
case 'J' + 'u' + 'n':
b = 6U;
break;
case 'J' + 'u' + 'l':
b = 7U;
break;
case 'A' + 'u' + 'g':
b = 8U;
break;
case 'S' + 'e' + 'p':
b = 9U;
break;
case 'O' + 'c' + 't':
b = 10U;
break;
case 'N' + 'o' + 'v':
b = 11U;
break;
case 'D' + 'e' + 'c':
b = 12U;
break;
default:
b = 0U;
break;
}
QS::u8_raw_(b); // store the month
QS::u8_raw_((10U * (DATE[9] - ZERO)) + (DATE[10] - ZERO));
QS::endRec_();
//! Callback to startup the QS facility
//! Callback to cleanup the QS facility
//! Callback to flush the QS trace data to the host
//! Callback to obtain a timestamp for a QS record
noexcept
//! Output std::uint64_t data element without format information
std::uint8_t chksum_ = priv_.chksum;
std::uint8_t * const buf_ = priv_.buf;
QSCtr head_ = priv_.head;
QSCtr const end_ = priv_.end;
priv_.used = (priv_.used + 8U); // 8 bytes are about to be added
for (std::int_fast8_t i = 8U; i != 0U; --i) {
std::uint8_t const b = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(b)
d >>= 8U;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output std::uint64_t data element with format information
//! @sa QS_U64(), QS_I64()
std::uint8_t chksum_ = priv_.chksum;
std::uint8_t * const buf_ = priv_.buf;
QSCtr head_ = priv_.head;
QSCtr const end_ = priv_.end;
priv_.used = (priv_.used + 9U); // 9 bytes are about to be added
QS_INSERT_ESC_BYTE_(format) // insert the format byte
for (std::int_fast8_t i = 8U; i != 0U; --i) {
format = static_cast<std::uint8_t>(d);
QS_INSERT_ESC_BYTE_(format)
d >>= 8U;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output 32-bit floating point data element with format information
//! @sa QS_F32()
union F32Rep {
float32_t f;
std::uint32_t u;
} fu32; // the internal binary representation
std::uint8_t chksum_ = priv_.chksum; // put in a temporary (register)
std::uint8_t * const buf_ = priv_.buf; // put in a temporary (register)
QSCtr head_ = priv_.head; // put in a temporary (register)
QSCtr const end_ = priv_.end; // put in a temporary (register)
fu32.f = d; // assign the binary representation
priv_.used = (priv_.used + 5U); // 5 bytes about to be added
QS_INSERT_ESC_BYTE_(format) // insert the format byte
for (std::uint_fast8_t i = 4U; i != 0U; --i) {
format = static_cast<std::uint8_t>(fu32.u);
QS_INSERT_ESC_BYTE_(format)
fu32.u >>= 8U;
}
priv_.head = head_; // save the head
priv_.chksum = chksum_; // save the checksum
noexcept
//! Output 64-bit floating point data element with format information
//! @sa QS_F64()
union F64Rep {
float64_t d;
std::uint32_t u[2];
} fu64; // the internal binary representation
std::uint8_t chksum_ = priv_.chksum;
std::uint8_t * const buf_ = priv_.buf;
QSCtr head_ = priv_.head;
QSCtr const end_ = priv_.end;
std::uint32_t i;
// static constant untion to detect endianness of the machine
static union U32Rep {
std::uint32_t u32;
std::uint8_t u8;
} const endian = { 1U };
fu64.d = d; // assign the binary representation
// is this a big-endian machine?
if (endian.u8 == 0U) {
// swap fu64.u[0] <-> fu64.u[1]...
i = fu64.u[0];
fu64.u[0] = fu64.u[1];
fu64.u[1] = i;
}
priv_.used = (priv_.used + 9U); // 9 bytes about to be added
QS_INSERT_ESC_BYTE_(format) // insert the format byte
// output 4 bytes from fu64.u[0]...
for (i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_(static_cast<std::uint8_t>(fu64.u[0]))
fu64.u[0] >>= 8U;
}
// output 4 bytes from fu64.u[1]...
for (i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_(static_cast<std::uint8_t>(fu64.u[1]))
fu64.u[1] >>= 8U;
}
priv_.head = head_; // update the head
priv_.chksum = chksum_; // update the checksum
//! QS software tracing parameters for QS input (QS-RX)
//! current objects
//! pointer to the start of the ring buffer
//! offset of the end of the ring buffer
//! offset to where next byte will be inserted
//! offset of where next byte will be extracted
//! QUTest event loop is running
//! the only instance of the QS-RX object (Singleton)
//! Kinds of objects used in QS::setCurrObj() and QS::queryCurrObj()
: std::uint8_t {
SM_OBJ, //!< state machine object for QEP
AO_OBJ, //!< active object
MP_OBJ, //!< event pool object
EQ_OBJ, //!< raw queue object
TE_OBJ, //!< time event object
AP_OBJ, //!< generic Application-specific object
MAX_OBJ
};
//! Object combinations for QS::setCurrObj() and QS::queryCurrObj()
: std::uint8_t {
SM_AO_OBJ = MAX_OBJ //!< combination of SM and AO
};
noexcept
//! Initialize the QS RX data buffer
//!
//! @details
//! This function should be called from QS::onStartup() to provide QS-RX
//! with the receive data buffer.
//!
//! @param[in] sto[] the address of the memory block
//! @param[in] stoSize the size of this block [bytes]. The size of the
//! QS RX buffer cannot exceed 64KB.
//!
//! @note
//! QS-RX can work with quite small data buffers, but you will start
//! losing data if the buffer is not drained fast enough (e.g., in the
//! idle task).
//!
//! @note
//! If the data input rate exceeds the QS-RX processing rate, the data
//! will be lost, but the QS protocol will notice that:
//! (1) that the checksum in the incomplete QS records will fail; and
//! (2) the sequence counter in QS records will show discontinuities.
//!
//! The QS-RX channel will report any data errors by sending the
//! QS_RX_DATA_ERROR trace record.
rxPriv_.buf = &sto[0];
rxPriv_.end = static_cast<QSCtr>(stoSize);
rxPriv_.head = 0U;
rxPriv_.tail = 0U;
rxPriv_.currObj[QS::SM_OBJ] = nullptr;
rxPriv_.currObj[QS::AO_OBJ] = nullptr;
rxPriv_.currObj[QS::MP_OBJ] = nullptr;
rxPriv_.currObj[QS::EQ_OBJ] = nullptr;
rxPriv_.currObj[QS::TE_OBJ] = nullptr;
rxPriv_.currObj[QS::AP_OBJ] = nullptr;
tran_(WAIT4_SEQ);
l_rx.esc = 0U;
l_rx.seq = 0U;
l_rx.chksum = 0U;
beginRec_(static_cast<std::uint_fast8_t>(QS_OBJ_DICT));
QS_OBJ_PRE_(&rxPriv_);
QS_STR_PRE_("QS_RX");
endRec_();
// no QS_REC_DONE(), because QS is not running yet
#ifdef Q_UTEST
QP::QS::testData.tpNum = 0U;
QP::QS::testData.testTime = 0U;
#endif // Q_UTEST
noexcept
//! Put one byte into the QS RX lock-free buffer
QSCtr head = rxPriv_.head + 1U;
if (head == rxPriv_.end) {
head = 0U;
}
if (head != rxPriv_.tail) { // buffer NOT full?
rxPriv_.buf[rxPriv_.head] = b;
rxPriv_.head = head;
return true; // byte placed in the buffer
}
else {
return false; // byte NOT placed in the buffer
}
noexcept
//! Obtain the number of free bytes in the QS RX data buffer
//!
//! @details
//! This function is intended to be called from the ISR that reads the
//! QS-RX bytes from the QSPY application. The function returns the
//! conservative number of free bytes currently available in the buffer,
//! assuming that the head pointer is not being moved concurrently.
//! The tail pointer might be moving, meaning that bytes can be
//! concurrently removed from the buffer.
QSCtr const head = rxPriv_.head;
if (head == rxPriv_.tail) { // buffer empty?
return static_cast<std::uint16_t>(rxPriv_.end - 1U);
}
else if (head < rxPriv_.tail) {
return static_cast<std::uint16_t>(rxPriv_.tail - head - 1U);
}
else {
return static_cast<std::uint16_t>(rxPriv_.end + rxPriv_.tail
- head - 1U);
}
//! Perform the QS-RX input (implemented in some QS ports)
noexcept
//! Set the "current object" in the Target
//!
//! @details
//! This function sets the "current object" in the Target.
Q_REQUIRE_ID(100, obj_kind < Q_DIM(rxPriv_.currObj));
rxPriv_.currObj[obj_kind] = obj_ptr;
noexcept
//! Query the "current object" in the Target
//!
//! @details
//! This function programmatically generates the response to the query for
//! a "current object".
Q_REQUIRE_ID(200, obj_kind < Q_DIM(rxPriv_.currObj));
if (QS::rxPriv_.currObj[obj_kind] != nullptr) {
QS_CRIT_STAT_
QS_CRIT_E_();
QS::beginRec_(static_cast<std::uint_fast8_t>(QS_QUERY_DATA));
QS_TIME_PRE_(); // timestamp
QS_U8_PRE_(obj_kind); // object kind
QS_OBJ_PRE_(QS::rxPriv_.currObj[obj_kind]); // object pointer
switch (obj_kind) {
case SM_OBJ: // intentionally fall through
case AO_OBJ:
QS_FUN_PRE_(
reinterpret_cast<QHsm *>(
QS::rxPriv_.currObj[obj_kind])->getStateHandler());
break;
case QS::MP_OBJ:
QS_MPC_PRE_(reinterpret_cast<QMPool *>(
QS::rxPriv_.currObj[obj_kind])->getNFree());
QS_MPC_PRE_(reinterpret_cast<QMPool *>(
QS::rxPriv_.currObj[obj_kind])->getNMin());
break;
case QS::EQ_OBJ:
QS_EQC_PRE_(reinterpret_cast<QEQueue *>(
QS::rxPriv_.currObj[obj_kind])->getNFree());
QS_EQC_PRE_(reinterpret_cast<QEQueue *>(
QS::rxPriv_.currObj[obj_kind])->getNMin());
break;
case QS::TE_OBJ:
QS_OBJ_PRE_(reinterpret_cast<QTimeEvt *>(
QS::rxPriv_.currObj[obj_kind])->getAct());
QS_TEC_PRE_(reinterpret_cast<QTimeEvt *>(
QS::rxPriv_.currObj[obj_kind])->getCtr());
QS_TEC_PRE_(reinterpret_cast<QTimeEvt *>(
QS::rxPriv_.currObj[obj_kind])->getInterval());
QS_SIG_PRE_(reinterpret_cast<QTimeEvt *>(
QS::rxPriv_.currObj[obj_kind])->sig);
QS_U8_PRE_ (reinterpret_cast<QTimeEvt *>(
QS::rxPriv_.currObj[obj_kind])->refCtr_);
break;
default:
break;
}
QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
}
else {
rxReportError_(static_cast<std::uint8_t>(QS_RX_AO_FILTER));
}
//! Parse all bytes present in the QS RX data buffer
QSCtr tail = rxPriv_.tail;
while (rxPriv_.head != tail) { // QS-RX buffer NOT empty?
std::uint8_t b = rxPriv_.buf[tail];
++tail;
if (tail == rxPriv_.end) {
tail = 0U;
}
rxPriv_.tail = tail; // update the tail to a *valid* index
if (l_rx.esc != 0U) { // escaped byte arrived?
l_rx.esc = 0U;
b ^= QS_ESC_XOR;
l_rx.chksum += b;
rxParseData_(b);
}
else if (b == QS_ESC) {
l_rx.esc = 1U;
}
else if (b == QS_FRAME) {
// get ready for the next frame
b = l_rx.state; // save the current state in b
l_rx.esc = 0U;
tran_(WAIT4_SEQ);
if (l_rx.chksum == QS_GOOD_CHKSUM) {
l_rx.chksum = 0U;
rxHandleGoodFrame_(b);
}
else { // bad checksum
l_rx.chksum = 0U;
rxReportError_(0x41U);
rxHandleBadFrame_(b);
}
}
else {
l_rx.chksum += b;
rxParseData_(b);
}
}
//! internal function to handle incoming (QS-RX) packet
std::uint8_t i;
std::uint8_t *ptr;
QS_CRIT_STAT_
switch (state) {
case WAIT4_INFO_FRAME: {
// no need to report Ack or Done
QS_CRIT_E_();
target_info_pre_(0U); // send only Target info
QS_CRIT_X_();
break;
}
case WAIT4_RESET_FRAME: {
// no need to report Ack or Done, because Target resets
QS::onReset(); // reset the Target
break;
}
case WAIT4_CMD_PARAM1: // intentionally fall-through
case WAIT4_CMD_PARAM2: // intentionally fall-through
case WAIT4_CMD_PARAM3: // intentionally fall-through
case WAIT4_CMD_FRAME: {
rxReportAck_(QS_RX_COMMAND);
QS::onCommand(l_rx.var.cmd.cmdId, l_rx.var.cmd.param1,
l_rx.var.cmd.param2, l_rx.var.cmd.param3);
#ifdef Q_UTEST
#if Q_UTEST != 0
QS::processTestEvts_(); // process all events produced
#endif // Q_UTEST != 0
#endif // Q_UTEST
rxReportDone_(QS_RX_COMMAND);
break;
}
case WAIT4_TICK_FRAME: {
rxReportAck_(QS_RX_TICK);
#ifdef Q_UTEST
QTimeEvt::tick1_(
static_cast<std::uint_fast8_t>(l_rx.var.tick.rate),
&QS::rxPriv_);
#if Q_UTEST != 0
QS::processTestEvts_(); // process all events produced
#endif // Q_UTEST != 0
#else
QTimeEvt::tick_(
static_cast<std::uint_fast8_t>(l_rx.var.tick.rate),
&QS::rxPriv_);
#endif // Q_UTEST
rxReportDone_(QS_RX_TICK);
break;
}
case WAIT4_PEEK_FRAME: {
// no need to report Ack or Done
QS_CRIT_E_();
QS::beginRec_(static_cast<std::uint_fast8_t>(QS_PEEK_DATA));
ptr = static_cast<std::uint8_t*>(
QS::rxPriv_.currObj[QS::AP_OBJ]);
ptr = &ptr[l_rx.var.peek.offs];
QS_TIME_PRE_(); // timestamp
QS_U16_PRE_(l_rx.var.peek.offs); // data offset
QS_U8_PRE_(l_rx.var.peek.size); // data size
QS_U8_PRE_(l_rx.var.peek.num); // number of data items
for (i = 0U; i < l_rx.var.peek.num; ++i) {
switch (l_rx.var.peek.size) {
case 1:
QS_U8_PRE_(ptr[i]);
break;
case 2:
QS_U16_PRE_(
reinterpret_cast<std::uint16_t*>(ptr)[i]);
break;
case 4:
QS_U32_PRE_(
reinterpret_cast<std::uint32_t*>(ptr)[i]);
break;
default:
break;
}
}
QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
break;
}
case WAIT4_POKE_DATA: {
// received less than expected poke data items
rxReportError_(static_cast<std::uint8_t>(QS_RX_POKE));
break;
}
case WAIT4_POKE_FRAME: {
rxReportAck_(QS_RX_POKE);
// no need to report done
break;
}
case WAIT4_FILL_FRAME: {
rxReportAck_(QS_RX_FILL);
ptr = static_cast<std::uint8_t *>(
QS::rxPriv_.currObj[QS::AP_OBJ]);
ptr = &ptr[l_rx.var.poke.offs];
for (i = 0U; i < l_rx.var.poke.num; ++i) {
switch (l_rx.var.poke.size) {
case 1:
ptr[i] =
static_cast<std::uint8_t>(l_rx.var.poke.data);
break;
case 2:
reinterpret_cast<std::uint16_t *>(ptr)[i] =
static_cast<std::uint16_t>(l_rx.var.poke.data);
break;
case 4:
reinterpret_cast<std::uint32_t *>(ptr)[i] =
l_rx.var.poke.data;
break;
default:
break;
}
}
break;
}
case WAIT4_FILTER_FRAME: {
rxReportAck_(static_cast<enum QSpyRxRecords>(l_rx.var.flt.recId));
// apply the received filters
if (l_rx.var.flt.recId
== static_cast<std::uint8_t>(QS_RX_GLB_FILTER))
{
for (i = 0U;
i < static_cast<std::uint8_t>(sizeof(priv_.glbFilter));
++i)
{
priv_.glbFilter[i] = l_rx.var.flt.data[i];
}
// leave the "not maskable" filters enabled,
// see qs.h, Miscellaneous QS records (not maskable)
//
priv_.glbFilter[0] |= 0x01U;
priv_.glbFilter[7] |= 0xFCU;
priv_.glbFilter[8] |= 0x7FU;
// never enable the last 3 records (0x7D, 0x7E, 0x7F)
priv_.glbFilter[15] &= 0x1FU;
}
else if (l_rx.var.flt.recId
== static_cast<std::uint8_t>(QS_RX_LOC_FILTER))
{
for (i = 0U; i < Q_DIM(priv_.locFilter); ++i) {
priv_.locFilter[i] = l_rx.var.flt.data[i];
}
// leave QS_ID == 0 always on
priv_.locFilter[0] |= 0x01U;
}
else {
rxReportError_(l_rx.var.flt.recId);
}
// no need to report Done
break;
}
case WAIT4_OBJ_FRAME: {
i = l_rx.var.obj.kind;
if (i < static_cast<std::uint8_t>(QS::MAX_OBJ)) {
if (l_rx.var.obj.recId
== static_cast<std::uint8_t>(QS_RX_CURR_OBJ))
{
rxPriv_.currObj[i] =
reinterpret_cast<void *>(l_rx.var.obj.addr);
rxReportAck_(QS_RX_CURR_OBJ);
}
else if (l_rx.var.obj.recId
== static_cast<std::uint8_t>(QS_RX_AO_FILTER))
{
if (l_rx.var.obj.addr != 0U) {
std::int_fast16_t const filter =
static_cast<std::int_fast16_t>(
reinterpret_cast<QActive *>(
l_rx.var.obj.addr)->m_prio);
locFilter_((i == 0)
? filter
:-filter);
rxReportAck_(QS_RX_AO_FILTER);
}
else {
rxReportError_(static_cast<enum_t>(QS_RX_AO_FILTER));
}
}
else {
rxReportError_(l_rx.var.obj.recId);
}
}
// both SM and AO
else if (i == static_cast<std::uint8_t>(QS::SM_AO_OBJ)) {
if (l_rx.var.obj.recId
== static_cast<std::uint8_t>(QS_RX_CURR_OBJ))
{
rxPriv_.currObj[SM_OBJ]
= reinterpret_cast<void *>(l_rx.var.obj.addr);
rxPriv_.currObj[AO_OBJ]
= reinterpret_cast<void *>(l_rx.var.obj.addr);
}
rxReportAck_(
static_cast<enum QSpyRxRecords>(l_rx.var.obj.recId));
}
else {
rxReportError_(l_rx.var.obj.recId);
}
break;
}
case WAIT4_QUERY_FRAME: {
queryCurrObj(l_rx.var.obj.kind);
break;
}
case WAIT4_EVT_FRAME: {
// NOTE: Ack was already reported in the WAIT4_EVT_LEN state
#ifdef Q_UTEST
QS::onTestEvt(l_rx.var.evt.e); // "massage" the event, if needed
#endif // Q_UTEST
// use 'i' as status, 0 == success,no-recycle
i = 0U;
if (l_rx.var.evt.prio == 0U) { // publish
QActive::publish_(l_rx.var.evt.e, &QS::rxPriv_, 0U);
}
else if (l_rx.var.evt.prio < QF_MAX_ACTIVE) {
if (!QActive::registry_[l_rx.var.evt.prio]->POST_X(
l_rx.var.evt.e,
0U, // margin
&QS::rxPriv_))
{
// failed QACTIVE_POST() recycles the event
i = 0x80U; // failure, no recycle
}
}
else if (l_rx.var.evt.prio == 255U) {
// dispatch to the current SM object
if (QS::rxPriv_.currObj[QS::SM_OBJ] != nullptr) {
// increment the ref-ctr to simulate the situation
// when the event is just retreived from a queue.
// This is expected for the following QF::gc() call.
//
QEvt_refCtr_inc_(l_rx.var.evt.e);
static_cast<QHsm *>(QS::rxPriv_.currObj[QS::SM_OBJ])
->dispatch(l_rx.var.evt.e, 0U);
i = 0x01U; // success, recycle
}
else {
i = 0x81U; // failure, recycle
}
}
else if (l_rx.var.evt.prio == 254U) {
// init the current SM object"
if (QS::rxPriv_.currObj[QS::SM_OBJ] != nullptr) {
// increment the ref-ctr to simulate the situation
// when the event is just retreived from a queue.
// This is expected for the following QF::gc() call.
//
QEvt_refCtr_inc_(l_rx.var.evt.e);
static_cast<QHsm *>(QS::rxPriv_.currObj[QS::SM_OBJ])
->init(l_rx.var.evt.e, 0U);
i = 0x01U; // success, recycle
}
else {
i = 0x81U; // failure, recycle
}
}
else if (l_rx.var.evt.prio == 253U) {
// post to the current AO
if (QS::rxPriv_.currObj[QS::AO_OBJ] != nullptr) {
if (!static_cast<QActive *>(
QS::rxPriv_.currObj[QS::AO_OBJ])->POST_X(
l_rx.var.evt.e,
0U, // margin
&QS::rxPriv_))
{
// failed QACTIVE_POST() recycles the event
i = 0x80U; // failure, no recycle
}
}
else {
i = 0x81U; // failure, recycle
}
}
else {
i = 0x81U; // failure, recycle
}
#if (QF_MAX_EPOOL > 0U)
// recycle needed?
if ((i & 1U) != 0U) {
QF::gc(l_rx.var.evt.e);
}
#endif
// failure?
if ((i & 0x80U) != 0U) {
rxReportError_(static_cast<std::uint8_t>(QS_RX_EVENT));
}
else {
#ifdef Q_UTEST
#if Q_UTEST != 0
QS::processTestEvts_(); // process all events produced
#endif // Q_UTEST != 0
#endif // Q_UTEST
rxReportDone_(QS_RX_EVENT);
}
break;
}
#ifdef Q_UTEST
case WAIT4_TEST_SETUP_FRAME: {
rxReportAck_(QS_RX_TEST_SETUP);
QP::QS::testData.tpNum = 0U; // clear Test-Probes
QP::QS::testData.testTime = 0U; //clear time tick
// don't clear current objects
QS::onTestSetup(); // application-specific test setup
// no need to report Done
break;
}
case WAIT4_TEST_TEARDOWN_FRAME: {
rxReportAck_(QS_RX_TEST_TEARDOWN);
QS::onTestTeardown(); // application-specific test teardown
// no need to report Done
break;
}
case WAIT4_TEST_CONTINUE_FRAME: {
rxReportAck_(QS_RX_TEST_CONTINUE);
QS::rxPriv_.inTestLoop = false; // exit the QUTest loop
// no need to report Done
break;
}
case WAIT4_TEST_PROBE_FRAME: {
rxReportAck_(QS_RX_TEST_PROBE);
Q_ASSERT_ID(815,
QP::QS::testData.tpNum
< (sizeof(QP::QS::testData.tpBuf)
/ sizeof(QP::QS::testData.tpBuf[0])));
QP::QS::testData.tpBuf[QP::QS::testData.tpNum] = l_rx.var.tp;
++QP::QS::testData.tpNum;
// no need to report Done
break;
}
#endif // Q_UTEST
case ERROR_STATE: {
// keep ignoring all bytes until new frame
break;
}
default: {
rxReportError_(0x47U);
break;
}
}
//! callback function to reset the Target (to be implemented in the BSP)
//! Callback function to execute user commands (to be implemented in BSP)
//! record ID for posting events
{124U};
//! Test Probe attributes
{
QSFun addr; //!< pointer to function hosting the Test Probe
std::uint32_t data; //!< data associated with the Test Probe
std::uint8_t idx; //!< index of the Test Probe
};
//! QUTest data
{
TProbe tpBuf[16]; //!< up to 16 Test Probes
std::uint8_t tpNum; //!< # of registered Test Probes
QSTimeCtr testTime; //!< test time stamp
};
//! QUTest data
//! internal function to pause test and enter the test event loop
beginRec_(static_cast<std::uint_fast8_t>(QP::QS_TEST_PAUSED));
endRec_();
onTestLoop();
noexcept
//! get the test probe data for the given API
std::uint32_t data = 0U;
for (std::uint8_t i = 0U; i < testData.tpNum; ++i) {
if (testData.tpBuf[i].addr == reinterpret_cast<QSFun>(api)) {
data = testData.tpBuf[i].data;
QS_CRIT_STAT_
QS_CRIT_E_();
QS::beginRec_(static_cast<std::uint_fast8_t>(QS_TEST_PROBE_GET));
QS_TIME_PRE_(); // timestamp
QS_FUN_PRE_(api); // the calling API
QS_U32_PRE_(data); // the Test-Probe data
QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
--testData.tpNum; // one less Test-Probe
// move all remaining entries in the buffer up by one
for (std::uint8_t j = i; j < testData.tpNum; ++j) {
testData.tpBuf[j] = testData.tpBuf[j + 1U];
}
break; // we are done (Test-Probe retreived)
}
}
return data;
//! callback to setup a unit test inside the Target
//! callback to teardown after a unit test inside the Target
//! callback to "massage" the test event before dispatching/posting it
//! callback to examine an event that is about to be posted
//! callback to run the test loop
//! internal function to process posted events during test
QS_TEST_PROBE_DEF(&QS::processTestEvts_)
// return immediately (do nothing) for Test Probe != 0
QS_TEST_PROBE(return;)
while (QF::readySet_.notEmpty()) {
std::uint_fast8_t const p = QF::readySet_.findMax();
QActive * const a = QActive::registry_[p];
// perform the run-to-completion (RTC) step...
// 1. retrieve the event from the AO's event queue, which by this
// time must be non-empty and the "Vanialla" kernel asserts it.
// 2. dispatch the event to the AO's state machine.
// 3. determine if event is garbage and collect it if so
//
QEvt const * const e = a->get_();
a->dispatch(e, a->m_prio);
#if (QF_MAX_EPOOL > 0U)
QF::gc(e);
#endif
if (a->m_eQueue.isEmpty()) { // empty queue?
QF::readySet_.remove(p);
}
}
//! QF initialization for QUTest
//! Clear the internal QF variables, so that the framework can start
//! correctly even if the startup code fails to clear the uninitialized
//! data (as is required by the C++ Standard).
QActive::subscrList_ = nullptr;
QActive::maxPubSignal_ = 0;
QF::intLock_ = 0U;
QF::intNest_ = 0U;
QF::maxPool_ = 0U;
bzero(&QActive::registry_[0], sizeof(QActive::registry_));
bzero(&QF::readySet_, sizeof(QF::readySet_));
//! stop the QF customization for QUTest
//!
//! @sa QF::onCleanup()
QS::onReset();
//! QF::run() customization for QUTest
// function dictionaries for the standard API
QS_FUN_DICTIONARY(&QActive::post_);
QS_FUN_DICTIONARY(&QActive::postLIFO);
QS_FUN_DICTIONARY(&QS::processTestEvts_);
// produce the QS_QF_RUN trace record
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_RUN, 0U)
QS_END_PRE_()
QS::processTestEvts_(); // process all events posted so far
QS::onTestLoop(); // run the unit test
QS::onCleanup(); // application cleanup
return 0; // return no error
//! QActive active object class customization for QUTest
//! Starts execution of an active object and registers the object
//! with the framework customized for QUTest
Q_UNUSED_PAR(stkSto);
Q_UNUSED_PAR(stkSize);
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-priol
m_pthre = static_cast<std::uint8_t>(prioSpec >> 8U); // preemption-thre.
register_(); // make QF aware of this AO
m_eQueue.init(qSto, qLen); // initialize QEQueue of this AO
this->init(par, m_prio); // take the top-most initial tran. (virtual)
//! Stops execution of an active object and unregisters the object
//! with the framework customized for QUTest
unsubscribeAll(); // unsubscribe from all events
unregister_(); // remove this object from QF
//! QTimeEvt class customization for QUTest
//! Processes one clock tick for QUTest
// The testing version of system tick processing performs as follows:
// 1. If the Current Time Event (TE) Object is defined and the TE is armed,
// the TE is disarmed (if one-shot) and then posted to the recipient AO.
// 2. The linked-list of all armed Time Events is updated.
//
QF_CRIT_STAT_
QF_CRIT_E_();
QTimeEvt *prev = &QTimeEvt::timeEvtHead_[tickRate];
QS_BEGIN_NOCRIT_PRE_(QS_QF_TICK, 0U)
prev->m_ctr = (prev->m_ctr + 1U);
QS_TEC_PRE_(prev->m_ctr); // tick ctr
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
// is current Time Event object provided?
QTimeEvt *t = static_cast<QTimeEvt *>(QS::rxPriv_.currObj[QS::TE_OBJ]);
if (t != nullptr) {
// the time event must be armed
Q_ASSERT_ID(810, t->m_ctr != 0U);
// temp. for volatile
QActive * const act = static_cast<QActive *>(t->m_act);
// the recipient AO must be provided
Q_ASSERT_ID(820, act != nullptr);
// periodic time evt?
if (t->m_interval != 0U) {
t->m_ctr = t->m_interval; // rearm the time event
}
else { // one-shot time event: automatically disarm
t->m_ctr = 0U; // auto-disarm
// mark time event 't' as NOT linked
t->refCtr_ = static_cast<std::uint8_t>(t->refCtr_
& static_cast<std::uint8_t>(~TE_IS_LINKED));
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_AUTO_DISARM, act->m_prio)
QS_OBJ_PRE_(t); // this time event object
QS_OBJ_PRE_(act); // the target AO
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_POST, act->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(t); // the time event object
QS_SIG_PRE_(t->sig); // signal of this time event
QS_OBJ_PRE_(act); // the target AO
QS_U8_PRE_(tickRate); // tick rate
QS_END_NOCRIT_PRE_()
QF_CRIT_X_(); // exit crit. section before posting
// asserts if queue overflows
static_cast<void>(act->POST(t, sender));
QF_CRIT_E_();
}
// update the linked list of time events
for (;;) {
t = prev->m_next; // advance down the time evt. list
// end of the list?
if (t == nullptr) {
// any new time events armed since the last run of tick_()?
if (QTimeEvt::timeEvtHead_[tickRate].m_act != nullptr) {
// sanity check
Q_ASSERT_CRIT_(830, prev != nullptr);
prev->m_next = QTimeEvt::timeEvtHead_[tickRate].toTimeEvt();
QTimeEvt::timeEvtHead_[tickRate].m_act = nullptr;
t = prev->m_next; // switch to the new list
}
else {
break; // all currently armed time evts. processed
}
}
// time event scheduled for removal?
if (t->m_ctr == 0U) {
prev->m_next = t->m_next;
// mark time event 't' as NOT linked
t->refCtr_ = static_cast<std::uint8_t>(t->refCtr_
& static_cast<std::uint8_t>(~TE_IS_LINKED));
// do NOT advance the prev pointer
QF_CRIT_X_(); // exit crit. section to reduce latency
// prevent merging critical sections, see NOTE1 below
QF_CRIT_EXIT_NOP();
}
else {
prev = t; // advance to this time event
QF_CRIT_X_(); // exit crit. section to reduce latency
// prevent merging critical sections, see NOTE1 below
QF_CRIT_EXIT_NOP();
}
QF_CRIT_E_(); // re-enter crit. section to continue
}
QF_CRIT_X_();
//! Dummy HSM class for testing (inherits QP::QHsm)
//!
//! @details
//! QHsmDummy is a test double for the role of "Orthogonal Components"
//! HSM objects in QUTest unit testing.
//! ctor
: QHsm(nullptr)
override
Q_UNUSED_PAR(e);
#ifdef Q_SPY
if ((QS::priv_.flags & 0x01U) == 0U) {
QS::priv_.flags |= 0x01U;
QS_FUN_DICTIONARY(&QP::QHsm::top);
}
#else
Q_UNUSED_PAR(qs_id);
#endif
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.fun); // the source state
QS_FUN_PRE_(m_temp.fun); // the target of the initial transition
QS_END_PRE_()
override
QHsmDummy::init(nullptr, qs_id);
override
#ifndef Q_SPY
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
#endif
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.fun); // the current state
QS_END_PRE_()
//! Dummy Active Object class for testing (inherits QP::QActive)
//!
//! @details
//! QActiveDummy is a test double for the role of collaborating active
//! objects in QUTest unit testing.
//! ctor
: QActive(nullptr)
override
// No special preconditions for checking parameters to allow starting
// dummy AOs the exact same way as the real counterparts.
Q_UNUSED_PAR(qSto);
Q_UNUSED_PAR(qLen);
Q_UNUSED_PAR(stkSto);
Q_UNUSED_PAR(stkSize);
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio.
m_pthre = static_cast<std::uint8_t>(prioSpec >> 8U); // preemption-thre.
register_(); // make QF aware of this AO
QActiveDummy::init(par, m_prio); // take the top-most initial tran.
override
this->start(prioSpec, qSto, qLen, stkSto, stkSize, nullptr);
override
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
#ifdef Q_SPY
if ((QS::priv_.flags & 0x01U) == 0U) {
QS::priv_.flags |= 0x01U;
QS_FUN_DICTIONARY(&QP::QHsm::top);
}
#endif
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, m_prio)
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.fun); // the source state
QS_FUN_PRE_(m_temp.fun); // the target of the initial transition
QS_END_PRE_()
override
QActiveDummy::init(nullptr, qs_id);
override
Q_UNUSED_PAR(qs_id);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_DISPATCH, m_prio)
QS_TIME_PRE_(); // time stamp
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this state machine object
QS_FUN_PRE_(m_state.fun); // the current state
QS_END_PRE_()
noexcept override
QS_TEST_PROBE_DEF(&QActive::post_)
// test-probe#1 for faking queue overflow
bool status = true;
QS_TEST_PROBE_ID(1,
status = false;
if (margin == QF::NO_MARGIN) {
// fake assertion Mod=qf_actq,Loc=110
Q_onAssert("qf_actq", 110);
}
)
QF_CRIT_STAT_
QF_CRIT_E_();
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
std::uint_fast8_t const rec =
(status ? static_cast<std::uint8_t>(QS_QF_ACTIVE_POST)
: static_cast<std::uint8_t>(QS_QF_ACTIVE_POST_ATTEMPT));
QS_BEGIN_NOCRIT_PRE_(rec, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt
QS_EQC_PRE_(0U); // number of free entries
QS_EQC_PRE_(margin); // margin requested
QS_END_NOCRIT_PRE_()
// callback to examine the posted event under the same conditions
// as producing the #QS_QF_ACTIVE_POST trace record, which are:
// the local filter for this AO ('me->prio') is set
//
if ((QS::priv_.locFilter[m_prio >> 3U]
& (1U << (m_prio & 7U))) != 0U)
{
QS::onTestPost(sender, this, e, status);
}
QF_CRIT_X_();
// recycle the event immediately, because it was not really posted
#if (QF_MAX_EPOOL > 0U)
QF::gc(e);
#endif
return status;
noexcept override
QS_TEST_PROBE_DEF(&QActive::postLIFO)
// test-probe#1 for faking queue overflow
QS_TEST_PROBE_ID(1,
// fake assertion Mod=qf_actq,Loc=210
Q_onAssert("qf_actq", 210);
)
QF_CRIT_STAT_
QF_CRIT_E_();
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_LIFO, m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt
QS_EQC_PRE_(0U); // number of free entries
QS_EQC_PRE_(0U); // min number of free entries
QS_END_NOCRIT_PRE_()
// callback to examine the posted event under the same conditions
// as producing the #QS_QF_ACTIVE_POST trace record, which are:
// the local filter for this AO ('me->prio') is set
//
if ((QS::priv_.locFilter[m_prio >> 3U]
& (1U << (m_prio & 7U))) != 0U)
{
QS::onTestPost(nullptr, this, e, true);
}
QF_CRIT_X_();
// recycle the event immediately, because it was not really posted
#if (QF_MAX_EPOOL > 0U)
QF::gc(e);
#endif
//! QV idle callback (customized in BSPs for QV)
//!
//! @attention
//! QV::onIdle() must be called with interrupts DISABLED because the
//! determination of the idle condition (no events in the queues) can
//! change at any time by an interrupt posting events to a queue.
//! QV::onIdle() MUST enable interrupts internally, ideally **atomically**
//! with putting the CPU into a power-saving mode.
//! QF initialization for QV
#if (QF_MAX_EPOOL > 0U)
QF::maxPool_ = 0U;
#endif
bzero(&QTimeEvt::timeEvtHead_[0], sizeof(QTimeEvt::timeEvtHead_));
bzero(&QActive::registry_[0], sizeof(QActive::registry_));
bzero(&QF::readySet_, sizeof(QF::readySet_));
#ifdef QV_INIT
QV_INIT(); // port-specific initialization of the QV kernel
#endif
//! stop the QF customization for QV
//!
//! @sa QP::QF::onCleanup()
onCleanup(); // cleanup callback
// nothing else to do for the QV kernel
//! QF::run() customization for QV kernel
#ifdef Q_SPY
// produce the QS_QF_RUN trace record
QF_INT_DISABLE();
QS::beginRec_(QS_REC_NUM_(QS_QF_RUN));
QS::endRec_();
QF_INT_ENABLE();
#endif
onStartup(); // startup callback
QF_INT_DISABLE();
#ifdef QV_START
QV_START(); // port-specific startup of the QV kernel
#endif
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
std::uint_fast8_t pprev = 0U; // previous priority
#endif
for (;;) { // QV event loop...
// find the maximum priority AO ready to run
if (readySet_.notEmpty()) {
std::uint_fast8_t const p = readySet_.findMax();
QActive * const a = QActive::registry_[p];
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_NEXT, p)
QS_TIME_PRE_(); // timestamp
QS_2U8_PRE_(p, pprev); // scheduled prio & previous prio
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(((pprev != 0U)
? QActive::registry_[pprev]
: nullptr), a);
#endif // QF_ON_CONTEXT_SW
pprev = p; // update previous priority
#endif // (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
QF_INT_ENABLE();
// perform the run-to-completion (RTC) step...
// 1. retrieve the event from the AO's event queue, which by
// this time must be non-empty and The QV kernel asserts it.
// 2. dispatch the event to the AO's state machine.
// 3. determine if event is garbage and collect it if so
//
QEvt const * const e = a->get_();
a->dispatch(e, a->m_prio);
#if (QF_MAX_EPOOL > 0U)
gc(e);
#endif
QF_INT_DISABLE();
if (a->m_eQueue.isEmpty()) { // empty queue?
readySet_.remove(p);
}
}
else { // no AO ready to run --> idle
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (pprev != 0U) {
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_IDLE, pprev)
QS_TIME_PRE_(); // timestamp
QS_U8_PRE_(pprev); // previous prio
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QActive::registry_[pprev], nullptr);
#endif // QF_ON_CONTEXT_SW
pprev = 0U; // update previous prio
}
#endif // (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
// QV::onIdle() must be called with interrupts DISABLED because
// the determination of the idle condition (no events in the
// queues) can change at any time by an interrupt posting events
// to a queue. QV::onIdle() MUST enable interrupts internally,
// perhaps at the same time as putting the CPU into a power-saving
// mode.
QV::onIdle();
QF_INT_DISABLE();
}
}
#ifdef __GNUC__ // GNU compiler?
return 0;
#endif
//! QActive active object class customization for QV
//! Starts execution of an active object and registers the object
//! with the framework customized for QV
Q_UNUSED_PAR(stkSto); // not needed in QV
Q_UNUSED_PAR(stkSize); // not needed in QV
//! @pre stack storage must not be provided because the QV kernel
//! does not need per-AO stacks.
//!
Q_REQUIRE_ID(500, stkSto == nullptr);
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio.
m_pthre = static_cast<std::uint8_t>(prioSpec >> 8U); // preemption-thre.
register_(); // make QF aware of this AO
m_eQueue.init(qSto, qLen); // initialize QEQueue of this AO
this->init(par, m_prio); // take the top-most initial tran. (virtual)
QS_FLUSH(); // flush the trace buffer to the host
//! QV scheduler lock status (not needed in QV)
//! QV scheduler locking (not needed in QV)
(static_cast<void>(0))
//! QV scheduler unlocking (not needed in QV)
(static_cast<void>(0))
//! QV native event queue waiting
\
Q_ASSERT_ID(110, (me_)->m_eQueue.m_frontEvt != nullptr)
//! QV native event queue signaling
\
(QF::readySet_.insert(static_cast<std::uint_fast8_t>((me_)->m_prio)))
//! QK idle callback (customized in BSPs for QK)
//!
//! @details
//! QK::onIdle() is called continuously by the QK idle loop. This callback
//! gives the application an opportunity to enter a power-saving CPU mode,
//! or perform some other idle processing.
//!
//! @note
//! QK::onIdle() is invoked with interrupts enabled and must also return
//! with interrupts enabled.
//!
//! @sa QV::onIdle(), QXK::onIdle()
noexcept
//! QK selective scheduler lock
//!
//! @details
//! This function locks the QK scheduler to the specified ceiling.
//!
//! @param[in] ceiling priority ceiling to which the QK scheduler
//! needs to be locked
//!
//! @returns
//! The previous QK Scheduler lock status, which is to be used to unlock
//! the scheduler by restoring its previous lock status in
//! QP::QK::schedUnlock().
//!
//! @note
//! QP::QK::schedLock() must be always followed by the corresponding
//! QP::QK::schedUnlock().
//!
//! @sa QK_schedUnlock()
//!
//! @usage
//! The following example shows how to lock and unlock the QK scheduler:
//! @include qk_lock.cpp
QF_CRIT_STAT_
QF_CRIT_E_();
//! @pre The QK scheduler lock cannot be called from an ISR
Q_REQUIRE_ID(600, !QK_ISR_CONTEXT_());
// first store the previous lock prio
QSchedStatus stat;
if (ceiling > QK_attr_.lockCeil) { // raising the lock ceiling?
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_LOCK, 0U)
QS_TIME_PRE_(); // timestamp
// the previous lock ceiling & new lock ceiling
QS_2U8_PRE_(QK_attr_.lockCeil,
static_cast<std::uint8_t>(ceiling));
QS_END_NOCRIT_PRE_()
// previous status of the lock
stat = static_cast<QSchedStatus>(QK_attr_.lockHolder);
stat = stat | (static_cast<QSchedStatus>(QK_attr_.lockCeil) << 8U);
// new status of the lock
QK_attr_.lockHolder = QK_attr_.actPrio;
QK_attr_.lockCeil = static_cast<std::uint8_t>(ceiling);
}
else {
stat = 0xFFU; // scheduler not locked
}
QF_CRIT_X_();
return stat; // return the status to be saved in a stack variable
noexcept
//! QK selective scheduler unlock
//!
//! @details
//! This function unlocks the QK scheduler to the previous status.
//!
//! @param[in] stat previous QK Scheduler lock status returned from
//! QP::QK::schedLock()
//! @note
//! QP::QK::schedUnlock() must always follow the corresponding
//! QP::QK::schedLock().
//!
//! @sa QP::QK::schedLock()
//!
//! @usage
//! The following example shows how to lock and unlock the QK scheduler:
//! @include qk_lock.cpp
// has the scheduler been actually locked by the last QK_schedLock()?
if (stat != 0xFFU) {
std::uint8_t const lockCeil = QK_attr_.lockCeil;
std::uint8_t const prevCeil = static_cast<std::uint8_t>(stat >> 8U);
QF_CRIT_STAT_
QF_CRIT_E_();
//! @pre The scheduler cannot be unlocked:
//! - from the ISR context; and
//! - the current lock ceiling must be greater than the previous
Q_REQUIRE_ID(700, (!QK_ISR_CONTEXT_())
&& (lockCeil > prevCeil));
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_UNLOCK, 0U)
QS_TIME_PRE_(); // timestamp
// current lock ceiling (old), previous lock ceiling (new)
QS_2U8_PRE_(lockCeil, prevCeil);
QS_END_NOCRIT_PRE_()
// restore the previous lock ceiling and lock holder
QK_attr_.lockCeil = prevCeil;
QK_attr_.lockHolder = static_cast<std::uint8_t>(stat & 0xFFU);
// find if any AOs should be run after unlocking the scheduler
if (QK_sched_() != 0U) { // synchronous preemption needed?
QK_activate_(); // synchronously activate any unlocked AOs
}
QF_CRIT_X_();
}
//! QF initialization for QK
#if (QF_MAX_EPOOL > 0U)
QF::maxPool_ = 0U;
#endif
bzero(&QTimeEvt::timeEvtHead_[0], sizeof(QTimeEvt::timeEvtHead_));
bzero(&QActive::registry_[0], sizeof(QActive::registry_));
bzero(&QF::readySet_, sizeof(QF::readySet_));
bzero(&QK_attr_, sizeof(QK_attr_));
// setup the QK scheduler as initially locked and not running
QK_attr_.lockCeil = (QF_MAX_ACTIVE + 1U); // scheduler locked
// storage capable for holding a blank QActive object (const in ROM)
static void* const
idle_ao[((sizeof(QActive) + sizeof(void*)) - 1U) / sizeof(void*)]
= { nullptr };
// register the blank QActive object as the idle-AO (cast 'const' away)
QActive::registry_[0] = QF_CONST_CAST_(QActive*,
reinterpret_cast<QActive const*>(idle_ao));
#ifdef QK_INIT
QK_INIT(); // port-specific initialization of the QK kernel
#endif
//! stop the QF customization for QK
//!
//! @sa QF::onCleanup()
onCleanup(); // cleanup callback
// nothing else to do for the QK preemptive kernel
//! QF::run() customization for QK kernel
#ifdef Q_SPY
// produce the QS_QF_RUN trace record
QF_INT_DISABLE();
QS::beginRec_(QS_REC_NUM_(QS_QF_RUN));
QS::endRec_();
QF_INT_ENABLE();
#endif
onStartup(); // startup callback
QF_INT_DISABLE();
QK_attr_.lockCeil = 0U; // unlock the QK scheduler
// activate AOs to process events posted so far
if (QK_sched_() != 0U) {
QK_activate_();
}
onStartup(); // application-specific startup callback
#ifdef QK_START
QK_START(); // port-specific startup of the QK kernel
#endif
QF_INT_ENABLE();
for (;;) { // QK idle loop...
QK::onIdle(); // application-specific QK on-idle callback
}
#ifdef __GNUC__ // GNU compiler?
return 0;
#endif
//! QActive active object class customization for QV
//! Starts execution of an active object and registers the object
//! with the framework customized for QK
Q_UNUSED_PAR(stkSto); // not needed in QK
Q_UNUSED_PAR(stkSize); // not needed in QK
//! @pre
//! AO cannot be started from an ISR, and the stack storage must not
//! be provided, because the QK kernel does not need per-AO stacks.
Q_REQUIRE_ID(300, (!QK_ISR_CONTEXT_())
&& (stkSto == nullptr));
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio.
m_pthre = static_cast<std::uint8_t>(prioSpec >> 8U); // preemption-thre.
register_(); // make QF aware of this AO
m_eQueue.init(qSto, qLen); // initialize the built-in queue
this->init(par, m_prio); // take the top-most initial tran. (virtual)
QS_FLUSH(); // flush the trace buffer to the host
// See if this AO needs to be scheduled in case QK is already running
QF_CRIT_STAT_
QF_CRIT_E_();
if (QK_sched_() != 0U) { // synchronous preemption needed?
QK_activate_(); // synchronously activate AOs
}
QF_CRIT_X_();
//! attributes of the QK kernel (extern "C" for easy access in assembly)
{
std::uint8_t volatile actPrio; //!< prio of the active AO
std::uint8_t volatile nextPrio; //!< prio of the next AO to execute
std::uint8_t volatile actThre; //!< active preemption-threshold
std::uint8_t volatile lockCeil; //!< lock preemption-ceiling (0==no-lock)
std::uint8_t volatile lockHolder; //!< prio of the lock holder
};
//! attributes of the QK kernel (extern "C" to be accessible from C)
noexcept
//! QK scheduler finds the highest-priority thread ready to run
//!
//! @details
//! The QK scheduler finds out the priority of the highest-priority AO
//! that (1) has events to process and (2) has priority that is above the
//! current priority.
//!
//! @returns
//! The QF-priority of the next active object to activate, or zero
//! if no activation of AO is needed.
//!
//! @attention
//! QK_sched_() must be always called with interrupts **disabled** and
//! returns with interrupts **disabled**.
std::uint_fast8_t p;
if (QP::QF::readySet_.isEmpty()) {
p = 0U; // no activation needed
}
else {
// find the highest-prio AO with non-empty event queue
p = QP::QF::readySet_.findMax();
// is the AO's priority below the active preemption-threshold?
if (p <= QK_attr_.actThre) {
p = 0U; // no activation needed
}
// is the AO's priority below the lock preemption-ceiling?
else if (p <= QK_attr_.lockCeil) {
p = 0U; // no activation needed
}
else {
QK_attr_.nextPrio = static_cast<std::uint8_t>(p);
}
}
return p;
noexcept
//! QK activator activates the next active object. The activated AO preempts
//! the currently executing AOs
//!
//! @details
//! QK_activate_() activates ready-to run AOs that are above the initial
//! preemption-threshold.
//!
//! @note
//! The activator might enable interrupts internally, but always returns with
//! interrupts **disabled**.
std::uint8_t const prio_in = QK_attr_.actPrio; // saved initial priority
std::uint8_t p = QK_attr_.nextPrio; // next prio to run
QK_attr_.nextPrio = 0U; // clear for the next time
// QK_attr_.actPrio and QK_attr_.nextPrio must be in range
Q_REQUIRE_ID(500, (prio_in <= QF_MAX_ACTIVE)
&& (0U < p) && (p <= QF_MAX_ACTIVE));
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
std::uint8_t pprev = prio_in;
#endif // QF_ON_CONTEXT_SW || Q_SPY
// loop until no more ready-to-run AOs of higher prio than the initial
QP::QActive *a;
do {
a = QP::QActive::registry_[p]; // obtain the pointer to the AO
// set new active priority and preemption-ceiling
QK_attr_.actPrio = p;
QK_attr_.actThre = QP::QActive::registry_[p]->m_pthre;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (p != pprev) { // changing threads?
QS_BEGIN_NOCRIT_PRE_(QP::QS_SCHED_NEXT, p)
QS_TIME_PRE_(); // timestamp
QS_2U8_PRE_(p, // priority of the scheduled AO
pprev); // previous priority
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
// context-switch callback
QF_onContextSw(((pprev != 0U)
? QP::QActive::registry_[pprev]
: nullptr), a);
#endif // QF_ON_CONTEXT_SW
pprev = p; // update previous priority
}
#endif // QF_ON_CONTEXT_SW || Q_SPY
QF_INT_ENABLE(); // unconditionally enable interrupts
// perform the run-to-completion (RTC) step...
// 1. retrieve the event from the AO's event queue, which by this
// time must be non-empty and QActive::get_() asserts it.
// 2. dispatch the event to the AO's state machine.
// 3. determine if event is garbage and collect it if so
QP::QEvt const * const e = a->get_();
a->dispatch(e, a->m_prio);
#if (QF_MAX_EPOOL > 0U)
QP::QF::gc(e);
#endif
// determine the next highest-priority AO ready to run...
QF_INT_DISABLE(); // unconditionally disable interrupts
if (a->m_eQueue.isEmpty()) { // empty queue?
QP::QF::readySet_.remove(p);
}
if (QP::QF::readySet_.isEmpty()) {
p = 0U; // no activation needed
}
else {
// find new highest-prio AO ready to run...
p = static_cast<std::uint8_t>(QP::QF::readySet_.findMax());
// is the new priority below the initial preemption-threshold?
if (p <= QP::QActive::registry_[prio_in]->m_pthre) {
p = 0U; // no activation needed
}
// is the AO's priority below the lock preemption-ceiling?
else if (p <= QK_attr_.lockCeil) {
p = 0U; // no activation needed
}
else {
Q_ASSERT_ID(510, p <= QF_MAX_ACTIVE);
}
}
} while (p != 0U);
// restore the active priority and preemption-threshold
QK_attr_.actPrio = prio_in;
QK_attr_.actThre = QP::QActive::registry_[prio_in]->m_pthre;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (prio_in != 0U) { // resuming an active object?
a = QP::QActive::registry_[prio_in]; // pointer to preempted AO
QS_BEGIN_NOCRIT_PRE_(QP::QS_SCHED_NEXT, prio_in)
QS_TIME_PRE_(); // timestamp
// priority of the resumed AO, previous priority
QS_2U8_PRE_(prio_in, pprev);
QS_END_NOCRIT_PRE_()
}
else { // resuming priority==0 --> idle
a = nullptr; // QK idle loop
QS_BEGIN_NOCRIT_PRE_(QP::QS_SCHED_IDLE, pprev)
QS_TIME_PRE_(); // timestamp
QS_U8_PRE_(pprev); // previous priority
QS_END_NOCRIT_PRE_()
}
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QP::QActive::registry_[pprev], a);
#endif // QF_ON_CONTEXT_SW
#endif // QF_ON_CONTEXT_SW || Q_SPY
//! Internal port-specific macro that checks the execution context
//! (ISR vs. thread). Might be overridden in qk_port.hpp.
//!
//! @returns
//! 'true' if the code executes in the ISR context and 'false' otherwise.
(QF::intNest_ != 0U)
//! QK scheduler lock status
QSchedStatus lockStat_;
//! QK selective scheduler locking
do { \
if (QK_ISR_CONTEXT_()) { \
lockStat_ = 0xFFU; \
} else { \
lockStat_ = QK::schedLock((ceil_)); \
} \
} while (false)
//! QK selective scheduler unlocking
do { \
if (lockStat_ != 0xFFU) { \
QK::schedUnlock(lockStat_); \
} \
} while (false)
// QK native event queue waiting
\
Q_ASSERT_ID(110, (me_)->m_eQueue.m_frontEvt != nullptr)
// QK native event queue signaling
do { \
QF::readySet_.insert( \
static_cast<std::uint_fast8_t>((me_)->m_prio)); \
if (!QK_ISR_CONTEXT_()) { \
if (QK_sched_() != 0U) { \
QK_activate_(); \
} \
} \
} while (false)
//! QXK idle callback (customized in BSPs for QXK)
//!
//! @details
//! QXK::onIdle() is called continously by the QXK idle loop. This
//! callback gives the application an opportunity to enter a power-saving
//! CPU mode, or perform some other idle processing.
//!
//! @note
//! QXK::onIdle() is invoked with interrupts enabled and must also return
//! with interrupts enabled.
//!
//! @sa
//! QK::onIdle(), QXK::onIdle()
noexcept
//! QXK selective scheduler lock
//!
//! @details
//! This function locks the QXK scheduler to the specified ceiling.
//!
//! @param[in] ceiling priority ceiling to which the QXK scheduler
//! needs to be locked
//!
//! @returns
//! The previous QXK Scheduler lock status, which is to be used to unlock
//! the scheduler by restoring its previous lock status in
//! QXK::schedUnlock().
//!
//! @note
//! QXK::schedLock() must be always followed by the corresponding
//! QXK::schedUnlock().
//!
//! @sa QXK::schedUnlock()
//!
//! @usage
//! The following example shows how to lock and unlock the QXK scheduler:
//! @include qxk_lock.cpp
QF_CRIT_STAT_
QF_CRIT_E_();
//! @pre The QXK scheduler lock cannot be called from an ISR;
Q_REQUIRE_ID(400, !QXK_ISR_CONTEXT_());
QSchedStatus stat; // saved lock status to be returned
// is the lock ceiling being raised?
if (ceiling > static_cast<std::uint_fast8_t>(QXK_attr_.lockCeil)) {
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_LOCK, 0U)
QS_TIME_PRE_(); // timestamp
// the previous lock prio & new lock prio
QS_2U8_PRE_(QXK_attr_.lockCeil,
static_cast<std::uint8_t>(ceiling));
QS_END_NOCRIT_PRE_()
// previous status of the lock
stat = static_cast<QSchedStatus>(QXK_attr_.lockHolder);
stat |= static_cast<QSchedStatus>(QXK_attr_.lockCeil) << 8U;
// new status of the lock
QXK_attr_.lockHolder = (QXK_attr_.curr != nullptr)
? QXK_attr_.curr->m_prio
: 0U;
QXK_attr_.lockCeil = static_cast<std::uint8_t>(ceiling);
}
else {
stat = 0xFFU; // scheduler not locked
}
QF_CRIT_X_();
return stat; // return the status to be saved in a stack variable
noexcept
//! QXK selective scheduler unlock
//!
//! @details
//! This function unlocks the QXK scheduler to the previous status.
//!
//! @param[in] stat previous QXK Scheduler lock status returned
//! from QXK::schedLock()
//! @note
//! A QXK scheduler can be locked from both basic threads (AOs) and
//! extended threads and the scheduler locks can nest.
//!
//! @note
//! QXK::schedUnlock() must always follow the corresponding
//! QXK::schedLock().
//!
//! @sa QXK::schedLock()
//!
//! @usage
//! The following example shows how to lock and unlock the QXK scheduler:
//! @include qxk_lock.cpp
// has the scheduler been actually locked by the last QXK::schedLock()?
if (stat != 0xFFU) {
std::uint8_t const lockCeil = QXK_attr_.lockCeil;
std::uint8_t const prevCeil = static_cast<std::uint8_t>(stat >> 8U);
QF_CRIT_STAT_
QF_CRIT_E_();
//! @pre The scheduler cannot be unlocked:
//! - from the ISR context; and
//! - the current lock priority must be greater than the previous
Q_REQUIRE_ID(500, (!QXK_ISR_CONTEXT_())
&& (lockCeil > prevCeil));
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_UNLOCK, 0U)
QS_TIME_PRE_(); // timestamp
// ceiling before unlocking & prio after unlocking
QS_2U8_PRE_(lockCeil, prevCeil);
QS_END_NOCRIT_PRE_()
// restore the previous lock priority and lock holder
QXK_attr_.lockCeil = prevCeil;
QXK_attr_.lockHolder = static_cast<std::uint8_t>(stat & 0xFFU);
// find the highest-prio thread ready to run
if (QXK_sched_() != 0U) { // synchronous preemption needed?
QXK_activate_(); // synchronously activate unlocked AOs
}
QF_CRIT_X_();
}
//! timeout signals for extended threads
: enum_t {
DELAY_SIG = 1,
TIMEOUT_SIG
};
//! Extended (blocking) thread of the QXK preemptive kernel
//!
//! @details
//! QP::QXThread represents the extended (blocking) thread of the QXK kernel.
//! Each blocking thread in the application must be represented by the
//! corresponding QP::QXThread instance
//!
//! @note
//! Typically QP::QXThread is instantiated directly in the application code.
//! The customization of the thread occurs in the constructor, where you
//! provide the thread-handler function as the parameter.
//!
//! @sa QP::QActive
//!
//! @usage
//! The following example illustrates how to instantiate and use an extended
//! thread in your application.
//! @include qxk_thread.cpp
//!
//! time event to handle blocking timeouts
// friends...
noexcept
//! public constructor
//!
//! @details
//! Performs the first step of QXThread initialization by assigning the
//! thread-handler function and the tick rate at which it will handle
//! the timeouts.
//!
//! @param[in] handler the thread-handler function
//! @param[in] tickRate the ticking rate associated with this thread
//! for timeouts in this thread (see QXThread::delay() and
//! TICK_X())
//! @note
//! Must be called only ONCE before QXThread::start().
: QActive(Q_STATE_CAST(handler)),
m_timeEvt(this, static_cast<enum_t>(QXK::DELAY_SIG),
static_cast<std::uint_fast8_t>(tickRate))
m_state.act = nullptr; // mark as extended thread
const noexcept
//! obtain the time event
return &m_timeEvt;
noexcept
//! delay (block) the current extended thread for a specified # ticks
//!
//! @details
//! Blocking delay for the number of clock tick at the associated
//! tick rate.
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to wait for the event to arrive.
//! @returns
//! 'true' if the delay expired and `false` if it was cancelled
//! by call to QTimeEvt::delayCancel()
//!
//! @note
//! For the delay to work, the TICK_X() macro needs to be called
//! periodically at the associated clock tick rate.
//!
//! @sa
//! QP::QXThread, QTimeEvt::tick_()
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const thr = QXK_PTR_CAST_(QXThread*, QXK_attr_.curr);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - number of ticks cannot be zero
//! - be called from an extended thread;
//! - the thread must NOT be already blocked on any object.
Q_REQUIRE_ID(800, (!QXK_ISR_CONTEXT_())
&& (nTicks != 0U)
&& (thr != nullptr)
&& (thr->m_temp.obj == nullptr));
//! @pre also: the thread must NOT be holding a scheduler lock
Q_REQUIRE_ID(801, QXK_attr_.lockHolder != thr->m_prio);
// remember the blocking object
thr->m_temp.obj = QXK_PTR_CAST_(QMState const*, &thr->m_timeEvt);
thr->teArm_(static_cast<enum_t>(QXK::DELAY_SIG), nTicks);
thr->block_();
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); // BLOCK here
QF_CRIT_E_();
// the blocking object must be the time event
Q_ENSURE_ID(890, thr->m_temp.obj
== QXK_PTR_CAST_(QMState*, &thr->m_timeEvt));
thr->m_temp.obj = nullptr; // clear
QF_CRIT_X_();
// signal of zero means that the time event was posted without
// being canceled.
return (thr->m_timeEvt.sig == 0U);
noexcept
//! cancel the delay
//!
//! @details
//! Cancel the blocking delay and cause return from the QXThread::delay()
//! function.
//!
//! @returns
//! "true" if the thread was actually blocked on QXThread::delay() and
//! "false" otherwise.
QF_CRIT_STAT_
QF_CRIT_E_();
bool wasArmed;
if (m_temp.obj == QXK_PTR_CAST_(QMState*, &m_timeEvt)) {
wasArmed = teDisarm_();
unblock_();
}
else {
wasArmed = false;
}
QF_CRIT_X_();
return wasArmed;
noexcept
//! Get a message from the private message queue (block if no messages)
//!
//! @details
//! The QXThread::queueGet() operation allows the calling extended thread
//! to receive QP events (see QP::QEvt) directly into its own built-in
//! event queue from an ISR, basic thread (AO), or another extended thread.
//!
//! If QXThread::queueGet() is called when no events are present in the
//! thread's private event queue, the operation blocks the current
//! extended thread until either an event is received, or a user-specified
//! timeout expires.
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to wait for the event to arrive. The value of
//! QP::QXTHREAD_NO_TIMEOUT indicates that no timeout
//! will occur and the queue will block indefinitely.
//! @returns
//! A pointer to the event. If the pointer is not nullptr, the event
//! was delivered. Otherwise the event pointer of nullptr indicates that
//! the queue has timed out.
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const thr = QXTHREAD_CAST_(QXK_attr_.curr);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - be called from an extended thread;
//! - the thread must NOT be already blocked on any object.
Q_REQUIRE_ID(500, (!QXK_ISR_CONTEXT_())
&& (thr != nullptr)
&& (thr->m_temp.obj == nullptr));
//! @pre also: the thread must NOT be holding a scheduler lock.
Q_REQUIRE_ID(501, QXK_attr_.lockHolder != thr->m_prio);
// is the queue empty? -- block and wait for event(s)
if (thr->m_eQueue.m_frontEvt == nullptr) {
// remember the blocking object (the thread's queue)
thr->m_temp.obj = QXK_PTR_CAST_(QMState*, &thr->m_eQueue);
thr->teArm_(static_cast<enum_t>(QXK::TIMEOUT_SIG), nTicks);
QF::readySet_.remove(
static_cast<std::uint_fast8_t>(thr->m_prio));
static_cast<void>(QXK_sched_()); // synchronous scheduling
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); // BLOCK here
QF_CRIT_E_();
// the blocking object must be this queue
Q_ASSERT_ID(510, thr->m_temp.obj ==
QXK_PTR_CAST_(QMState *, &thr->m_eQueue));
thr->m_temp.obj = nullptr; // clear
}
// is the queue not empty?
QEvt const *e;
if (thr->m_eQueue.m_frontEvt != nullptr) {
e = thr->m_eQueue.m_frontEvt; // remove from the front
// volatile into tmp
QEQueueCtr const nFree = thr->m_eQueue.m_nFree + 1U;
thr->m_eQueue.m_nFree = nFree; // update the number of free
// any events in the ring buffer?
if (nFree <= thr->m_eQueue.m_end) {
// remove event from the tail
thr->m_eQueue.m_frontEvt =
thr->m_eQueue.m_ring[thr->m_eQueue.m_tail];
if (thr->m_eQueue.m_tail == 0U) {
thr->m_eQueue.m_tail = thr->m_eQueue.m_end; // wrap
}
// advance the tail (counter clockwise)
thr->m_eQueue.m_tail = (thr->m_eQueue.m_tail - 1U);
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET, thr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(&thr); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // poolID & ref Count
QS_EQC_PRE_(nFree); // number of free entries
QS_END_NOCRIT_PRE_()
}
else {
thr->m_eQueue.m_frontEvt = nullptr; // the queue becomes empty
// all entries in the queue must be free (+1 for fronEvt)
Q_ASSERT_ID(520, nFree == (thr->m_eQueue.m_end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET_LAST, thr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_SIG_PRE_(e->sig); // the signal of this event
QS_OBJ_PRE_(&thr); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // poolID & ref Count
QS_END_NOCRIT_PRE_()
}
}
else { // the queue is still empty -- the timeout must have fired
e = nullptr;
}
QF_CRIT_X_();
return e;
override
//! Overrides QHsm::init()
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
Q_ERROR_ID(110);
override
//! Overrides QHsm::init()
Q_UNUSED_PAR(qs_id);
Q_ERROR_ID(111);
override
//! Overrides QHsm::dispatch()
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
Q_ERROR_ID(120);
override
//! Starts execution of an extended thread and registers the thread
//! with the framework
//!
//! @details
//! Starts execution of an extended thread and registers it with the
//! framework. The extended thread becomes ready-to-run immediately and
//! is scheduled if the QXK is already running.
//!
//! @param[in] prioSpec priority specification at which to start the
//! extended thread
//! @param[in] qSto pointer to the storage for the ring buffer of
//! the event queue. This cold be NULL, if this
//! extended thread does not use the built-in
//! event queue.
//! @param[in] qLen length of the event queue [in events],
//! or zero if queue not used
//! @param[in] stkSto pointer to the stack storage (must be provided)
//! @param[in] stkSize stack size [in bytes] (must not be zero)
//! @param[in] par pointer to an extra parameter (might be NULL)
//!
//! @usage
//! The following example shows starting an extended thread:
//! @include qxk_start.cpp
Q_UNUSED_PAR(par);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - the stack storage must be provided;
//! - the thread must be instantiated (see #QXThread).
//! - preemption-threshold is NOT provided (because QXK kernel
//! does not support preemption-threshold scheduling)
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_())
&& (stkSto != nullptr)
&& (stkSize != 0U)
&& (m_state.act == nullptr)
&& ((prioSpec & 0xFF00U) == 0U));
// is storage for the queue buffer provided?
if (qSto != nullptr) {
m_eQueue.init(qSto, qLen);
}
// extended threads provide their thread function in place of
// the top-most initial transition 'm_temp.act'
QXK_stackInit_(this, m_temp.thr, stkSto, stkSize);
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio.
m_pthre = 0U; // preemption-threshold NOT used
register_(); // make QF aware of this AO
// the new thread is not blocked on any object
m_temp.obj = nullptr;
QF_CRIT_STAT_
QF_CRIT_E_();
// extended-thread becomes ready immediately
QF::readySet_.insert(static_cast<std::uint_fast8_t>(m_prio));
// see if this thread needs to be scheduled in case QXK is running
if (QXK_attr_.lockCeil <= QF_MAX_ACTIVE) {
static_cast<void>(QXK_sched_()); // synchronous scheduling
}
QF_CRIT_X_();
override
//! Overloaded start function (no initialization event)
this->start(prioSpec, qSto, qLen, stkSto, stkSize, nullptr);
noexcept override
//! Posts an event `e` directly to the event queue of the extended
//! thread using the First-In-First-Out (FIFO) policy
//!
//! @details
//! Extended threads can be configured (in QXThread::start()) to have
//! a private event queue. In that case, QP events (see QP::QEvt) can
//! be asynchronously posted or published to the extended thread.
//! The thread can wait (and block) on its queue and then it can
//! process the delivered event.
//!
//! @param[in] e pointer to the event to be posted
//! @param[in] margin number of required free slots in the queue
//! after posting the event. The special value
//! QF::NO_MARGIN means that this function will
//! assert if posting fails.
//! @param[in] sender pointer to a sender object (used in QS only)
//!
//! @returns
//! 'true' (success) if the posting succeeded (with the provided margin)
//! and 'false' (failure) when the posting fails.
//!
//! @attention
//! Should be called only via the macro POST() or POST_X().
//!
//! @note
//! The QF::NO_MARGIN value of the `margin` parameter is special and
//! denotes situation when the post() operation is assumed to succeed
//! (event delivery guarantee). An assertion fires, when the event cannot
//! be delivered in this case.
QF_CRIT_STAT_
QS_TEST_PROBE_DEF(&QXThread::post_)
// is it the private time event?
bool status;
if (e == &m_timeEvt) {
QF_CRIT_E_();
// the private time event is disarmed and not in any queue,
// so it is safe to change its signal. The signal of 0 means
// that the time event has expired.
m_timeEvt.sig = 0U;
unblock_();
QF_CRIT_X_();
status = true;
}
// is the event queue provided?
else if (m_eQueue.m_end != 0U) {
//! @pre event pointer must be valid
Q_REQUIRE_ID(300, e != nullptr);
QF_CRIT_E_();
QEQueueCtr nFree = m_eQueue.m_nFree; // get volatile into temporary
// test-probe#1 for faking queue overflow
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
if (margin == QF::NO_MARGIN) {
if (nFree > 0U) {
status = true; // can post
}
else {
status = false; // cannot post
Q_ERROR_CRIT_(310); // must be able to post the event
}
}
else if (nFree > static_cast<QEQueueCtr>(margin)) {
status = true; // can post
}
else {
status = false; // cannot post, but don't assert
}
// is it a dynamic event?
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); // increment the reference counter
}
if (status) { // can post the event?
--nFree; // one free entry just used up
m_eQueue.m_nFree = nFree; // update the volatile
if (m_eQueue.m_nMin > nFree) {
m_eQueue.m_nMin = nFree; // update minimum so far
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this active object
QS_2U8_PRE_(e->poolId_, e->refCtr_); // poolID & refCtr
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(m_eQueue.m_nMin); // min number of free entries
QS_END_NOCRIT_PRE_()
// queue empty?
if (m_eQueue.m_frontEvt == nullptr) {
m_eQueue.m_frontEvt = e; // deliver event directly
// is this thread blocked on the queue?
if (m_temp.obj == QXK_PTR_CAST_(QMState*, &m_eQueue)) {
static_cast<void>(teDisarm_());
QF::readySet_.insert(
static_cast<std::uint_fast8_t>(m_prio));
if (!QXK_ISR_CONTEXT_()) {
static_cast<void>(QXK_sched_());
}
}
}
// queue is not empty, insert event into the ring-buffer
else {
// insert event into the ring buffer (FIFO)
m_eQueue.m_ring[m_eQueue.m_head] = e;
// need to wrap the head couner?
if (m_eQueue.m_head == 0U) {
m_eQueue.m_head = m_eQueue.m_end; // wrap around
}
// advance the head (counter clockwise)
m_eQueue.m_head = (m_eQueue.m_head - 1U);
}
QF_CRIT_X_();
}
else { // cannot post the event
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_ATTEMPT, m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(sender); // the sender object
QS_SIG_PRE_(e->sig); // the signal of the event
QS_OBJ_PRE_(this); // this active object (recipient)
QS_2U8_PRE_(e->poolId_, e->refCtr_); // poolID & ref Count
QS_EQC_PRE_(nFree); // number of free entries
QS_EQC_PRE_(margin); // margin
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
#if (QF_MAX_EPOOL > 0U)
QF::gc(e); // recycle the event to avoid a leak
#endif
}
}
else { // the queue is not available
#if (QF_MAX_EPOOL > 0U)
QF::gc(e); // make sure the event is not leaked
#endif
status = false;
Q_ERROR_ID(320); // this extended thread cannot accept events
}
return status;
noexcept override
//! Posts an event directly to the event queue of the extended thread
//! using the Last-In-First-Out (LIFO) policy
//!
//! @details
//! Last-In-First-Out (LIFO) policy is not supported for extended threads.
//!
//! @param[in] e pointer to the event to post to the queue
//!
//! @sa
//! QXThread::post_(), QActive::postLIFO_()
Q_UNUSED_PAR(e);
Q_ERROR_ID(410);
const noexcept
//! Block the extended thread
//!
//! @details
//! Internal implementation of blocking the given extended thread.
//!
//! @note
//! Must be called from within a critical section
//! @pre the thread holding the lock cannot block!
Q_REQUIRE_ID(600, (QXK_attr_.lockHolder != m_prio));
QF::readySet_.remove(static_cast<std::uint_fast8_t>(m_prio));
static_cast<void>(QXK_sched_()); // synchronous scheduling
const noexcept
//! Unblock the extended thread
//!
//! @details
//! Internal implementation of unblocking the given extended thread.
//!
//! @note
//! must be called from within a critical section
QF::readySet_.insert(static_cast<std::uint_fast8_t>(m_prio));
if ((!QXK_ISR_CONTEXT_()) // not inside ISR?
&& (QActive::registry_[0] != nullptr)) // kernel started?
{
static_cast<void>(QXK_sched_()); // synchronous scheduling
}
noexcept
//! Arm the private time event
//!
//! @details
//! Internal implementation of arming the private time event for
//! a given timeout at a given system tick rate.
//!
//! @note
//! Must be called from within a critical section
//! @pre the time event must be unused
Q_REQUIRE_ID(700, m_timeEvt.m_ctr == 0U);
m_timeEvt.sig = static_cast<QSignal>(sig);
if (nTicks != QXTHREAD_NO_TIMEOUT) {
m_timeEvt.m_ctr = static_cast<QTimeEvtCtr>(nTicks);
m_timeEvt.m_interval = 0U;
// is the time event unlinked?
// NOTE: For the duration of a single clock tick of the specified tick
// rate a time event can be disarmed and yet still linked in the list,
// because un-linking is performed exclusively in QTimeEvt::tickX().
if (static_cast<std::uint8_t>(m_timeEvt.refCtr_ & TE_IS_LINKED) == 0U)
{
std::uint_fast8_t const tickRate =
static_cast<std::uint_fast8_t>(m_timeEvt.refCtr_);
// mark as linked
m_timeEvt.refCtr_ = static_cast<std::uint8_t>(
m_timeEvt.refCtr_ | TE_IS_LINKED);
// The time event is initially inserted into the separate
// "freshly armed" list based on timeEvtHead_[tickRate].act.
// Only later, inside QTimeEvt::tick_(), the "freshly armed"
// list is appended to the main list of armed time events based on
// timeEvtHead_[tickRate].next. Again, this is to keep any
// changes to the main list exclusively inside QTimeEvt::tick_().
//
m_timeEvt.m_next
= QXK_PTR_CAST_(QTimeEvt*,
QTimeEvt::timeEvtHead_[tickRate].m_act);
QTimeEvt::timeEvtHead_[tickRate].m_act = &m_timeEvt;
}
}
noexcept
//! Disarm the private time event
//!
//! @details
//! Internal implementation of disarming the private time event.
//!
//! @note
//! Must be called from within a critical section
bool wasArmed;
// is the time evt running?
if (m_timeEvt.m_ctr != 0U) {
wasArmed = true;
// schedule removal from list
m_timeEvt.m_ctr = 0U;
}
// the time event was already automatically disarmed
else {
wasArmed = false;
}
return wasArmed;
//! Counting Semaphore of the QXK preemptive kernel
//!
//! @details
//! QP::QXSemaphore is a blocking mechanism intended primarily for signaling
//! @ref QP::QXThread "extended threads". The semaphore is initialized with
//! the maximum count (see QP::QXSemaphore::init()), which allows you to
//! create a binary semaphore (when the maximum count is 1) and
//! counting semaphore when the maximum count is > 1.
//!
//! @usage
//! The following example illustrates how to instantiate and use the semaphore
//! in your application.
//! @include qxk_sema.cpp
//!
//! set of extended threads waiting on this semaphore
//! semaphore up-down counter
//! maximum value of the semaphore counter
noexcept
//! initialize the counting semaphore
//!
//! @details
//! Initializes a semaphore with the specified count and maximum count.
//! If the semaphore is used for resource sharing, both the initial count
//! and maximum count should be set to the number of identical resources
//! guarded by the semaphore. If the semaphore is used as a signaling
//! mechanism, the initial count should set to 0 and maximum count to 1
//! (binary semaphore).
//!
//! @param[in] count initial value of the semaphore counter
//! @param[in] max_count maximum value of the semaphore counter.
//! The purpose of the max_count is to limit the counter
//! so that the semaphore cannot unblock more times than
//! the maximum.
//! @note
//! QXSemaphore::init() must be called **before** the semaphore can be
//! used (signaled or waited on).
//! @pre max_count must be greater than zero
Q_REQUIRE_ID(100, max_count > 0U);
m_count = static_cast<std::uint16_t>(count);
m_max_count = static_cast<std::uint16_t>(max_count);
m_waitSet.setEmpty();
noexcept
//! wait (block) on the semaphore
//!
//! @details
//! When an extended thread calls QXSemaphore::wait() and the value of the
//! semaphore counter is greater than 0, QXSemaphore_wait() decrements the
//! semaphore counter and returns (true) to its caller. However, if the
//! value of the semaphore counter is 0, the function places the calling
//! thread in the waiting list for the semaphore. The thread waits until
//! the semaphore is signaled by calling QXSemaphore::signal(), or the
//! specified timeout expires. If the semaphore is signaled before the
//! timeout expires, QXK resumes the highest-priority extended thread
//! waiting for the semaphore.
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to wait for the semaphore. The value of
//! QP::QXTHREAD_NO_TIMEOUT indicates that no
//! timeout will occur and the semaphore will wait
//! indefinitely.
//! @returns
//! true if the semaphore has been signaled, and false if the timeout
//! occurred.
//!
//! @note
//! Multiple extended threads can wait for a given semaphore.
QF_CRIT_STAT_
QF_CRIT_E_();
// volatile into temp.
QXThread * const curr = QXK_PTR_CAST_(QXThread*, QXK_attr_.curr);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - the semaphore must be initialized
//! - be called from an extended thread;
//! - the thread must NOT be already blocked on any object.
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) // can't wait inside an ISR
&& (m_max_count > 0U)
&& (curr != nullptr)
&& (curr->m_temp.obj == nullptr));
//! @pre also: the thread must NOT be holding a scheduler lock.
Q_REQUIRE_ID(201, QXK_attr_.lockHolder != curr->m_prio);
bool signaled = true; // assume that the semaphore will be signaled
if (m_count > 0U) {
m_count = m_count - 1U; // semaphore taken: decrement
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(curr->m_prio, m_count);
QS_END_NOCRIT_PRE_()
}
else {
std::uint_fast8_t const p =
static_cast<std::uint_fast8_t>(curr->m_prio);
// remove the curr prio from the ready set (will block)
// and insert to the waiting set on this semaphore
QF::readySet_.remove(p);
m_waitSet.insert(p);
// remember the blocking object (this semaphore)
curr->m_temp.obj = QXK_PTR_CAST_(QMState*, this);
curr->teArm_(static_cast<enum_t>(QXK::TIMEOUT_SIG), nTicks);
QS_BEGIN_NOCRIT_PRE_(QS_SEM_BLOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(curr->m_prio, m_count);
QS_END_NOCRIT_PRE_()
// schedule the next thread if multitasking started
static_cast<void>(QXK_sched_()); // synchronous scheduling
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); // BLOCK here !!!
QF_CRIT_E_(); // AFTER unblocking...
// the blocking object must be this semaphore
Q_ASSERT_ID(240, curr->m_temp.obj
== QXK_PTR_CAST_(QMState*, this));
// did the blocking time-out? (signal of zero means that it did)
if (curr->m_timeEvt.sig == 0U) {
if (m_waitSet.hasElement(p)) { // still waiting?
m_waitSet.remove(p); // remove unblocked thread
signaled = false; // the semaphore was NOT signaled
// semaphore NOT taken: do NOT decrement the count
}
else { // semaphore was both signaled and timed out
m_count = m_count - 1U; // semaphore taken: decrement
}
}
else { // blocking did NOT time out
// the thread must NOT be waiting on this semaphore
Q_ASSERT_ID(250, !m_waitSet.hasElement(p));
m_count = m_count - 1U; // semaphore taken: decrement
}
curr->m_temp.obj = nullptr; // clear blocking obj.
}
QF_CRIT_X_();
return signaled;
noexcept
//! try wait on the semaphore (non-blocking)
//!
//! @details
//! This operation checks if the semaphore counter is greater than 0,
//! in which case the counter is decremented.
//!
//! @returns
//! 'true' if the semaphore has count available and 'false' NOT available.
//!
//! @note
//! This function can be called from any context, including ISRs and
//! basic threads (active objects).
QF_CRIT_STAT_
QF_CRIT_E_();
//! @pre the semaphore must be initialized
Q_REQUIRE_ID(300, m_max_count > 0U);
#ifdef Q_SPY
// volatile into temp.
QActive const * const curr = QXK_PTR_CAST_(QActive*, QXK_attr_.curr);
#endif // Q_SPY
bool isAvailable;
// is the semaphore available?
if (m_count > 0U) {
m_count = m_count - 1U; // semaphore signaled: decrement
isAvailable = true;
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(curr->m_prio, m_count);
QS_END_NOCRIT_PRE_()
}
else { // the semaphore is NOT available (would block)
isAvailable = false;
QS_BEGIN_NOCRIT_PRE_(QS_SEM_BLOCK_ATTEMPT, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(curr->m_prio,
m_count);
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return isAvailable;
noexcept
//! signal (unblock) the semaphore
//!
//! @details
//! If the semaphore counter value is 0 or more, it is incremented, and
//! this function returns to its caller. If the extended threads are
//! waiting for the semaphore to be signaled, QXSemaphore::signal()
//! removes the highest-priority thread waiting for the semaphore from
//! the waiting list and makes this thread ready-to-run. The QXK
//! scheduler is then called to determine if the awakened thread is now
//! the highest-priority thread that is ready-to-run.
//!
//! @returns
//! 'true' when the semaphore gets signaled and 'false' when the
//! semaphore count exceeded the maximum.
//!
//! @note
//! A semaphore can be signaled from many places, including from ISRs,
//! basic threads (AOs), and extended threads.
//! @pre the semaphore must be initialized
Q_REQUIRE_ID(400, m_max_count > 0U);
QF_CRIT_STAT_
QF_CRIT_E_();
bool signaled = true; // assume that the semaphore will be signaled
if (m_count < m_max_count) {
m_count = m_count + 1U; // semaphore signaled: increment
#ifdef Q_SPY
// volatile into temp.
QActive const * const curr = QXK_PTR_CAST_(QActive*, QXK_attr_.curr);
#endif // Q_SPY
QS_BEGIN_NOCRIT_PRE_(QS_SEM_SIGNAL, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(curr->m_prio,
m_count);
QS_END_NOCRIT_PRE_()
if (m_waitSet.notEmpty()) {
// find the highest-priority thread waiting on this semaphore
std::uint_fast8_t const p = m_waitSet.findMax();
QXThread * const thr =
QXK_PTR_CAST_(QXThread*, QActive::registry_[p]);
// assert that the tread:
// - must be registered in QF;
// - must be extended; and
// - must be blocked on this semaphore;
Q_ASSERT_ID(410, (thr != nullptr)
&& (thr->m_osObject != nullptr)
&& (thr->m_temp.obj
== QXK_PTR_CAST_(QMState*, this)));
// disarm the internal time event
static_cast<void>(thr->teDisarm_());
// make the thread ready to run and remove from the wait-list
QF::readySet_.insert(p);
m_waitSet.remove(p);
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, thr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this semaphore
QS_2U8_PRE_(thr->m_prio, m_count);
QS_END_NOCRIT_PRE_()
if (!QXK_ISR_CONTEXT_()) { // not inside ISR?
static_cast<void>(QXK_sched_()); // synchronous scheduling
}
}
}
else {
signaled = false; // semaphore NOT signaled
}
QF_CRIT_X_();
return signaled;
//! Blocking, Priority-Ceiling Mutex the QXK preemptive kernel
//!
//! @details
//! QP::QXMutex is a blocking mutual exclusion mechanism that can also apply
//! the **priority-ceiling protocol** to avoid unbounded priority inversion
//! (if initialized with a non-zero ceiling priority, see QXMutex::init()).
//! In that case, QP::QXMutex requires its own uinque QP priority level,
//! which cannot be used by any thread or any other QP::QXMutex.
//! If initialized with preemption-ceiling of zero, QXMutex does **not**
//! use the priority-ceiling protocol and does not require a unique QP
//! priority (see QXMutex::init()).
//! QP::QXMutex is **recursive** (re-entrant), which means that it can be
//! locked multiple times (up to 255 levels) by the *same* thread without
//! causing deadlock.<br>
//!
//! QP::QXMutex is primarily intended for the @ref QP::QXThread
//! "extended (blocking) threads", but can also be used by the
//! @ref QPP::QActive "basic threads" through the non-blocking
//! QXMutex::tryLock() API.
//!
//! @note
//! QP::QXMutex should be used in situations when at least one of the extended
//! threads contending for the mutex blocks while holding the mutex (between
//! the QXMutex::lock() and QXMutex_unlock() operations). If no blocking is
//! needed while holding the mutex, the more efficient non-blocking mechanism
//! of @ref srs_qxk_schedLock() "selective QXK scheduler locking" should be
//! used instead. @ref srs_qxk_schedLock() "Selective scheduler locking" is
//! available for both @ref QP::QActive "basic threads" and @ref QP::QXThread
//! "extended threads", so it is applicable to situations where resources
//! are shared among all these threads.
//!
//! @usage
//! The following example illustrates how to instantiate and use the mutex
//! in your application.
//! @include qxk_mutex.cpp
//!
//! set of extended-threads waiting on this mutex
//! default constructor
: QActive(Q_STATE_CAST(0))
noexcept
//! initialize the QXK priority-ceiling mutex QP::QXMutex
//!
//! @details
//! Initialize the QXK priority ceiling mutex.
//!
//! @param[in] prioSpec the priority specification for the mutex
//! (See also QP::QPrioSpec). This value might
//! also be zero.
//! @note
//! `prioSpec == 0` means that the priority-ceiling protocol shall **not**
//! be used by this mutex. Such mutex will **not** change (boost) the
//! priority of the holding thread.
//!
//! @note
//! `prioSpec == 0` means that the priority-ceiling protocol shall **not**
//! be used by this mutex. Such mutex will **not** change (boost) the
//! priority of the holding threads.<br>
//!
//! Conversely, `prioSpec != 0` means that the priority-ceiling protocol
//! shall be used by this mutex. Such mutex **will** temporarily boost
//! the priority and priority-threshold of the holding thread to the
//! priority specification in `prioSpec` (see QP::QPrioSpec).
//!
//! @usage
//! @include qxk_mutex.cpp
//! @pre preemption-threshold must not be used
Q_REQUIRE_ID(100, (prioSpec & 0xFF00U) == 0U);
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU);
m_pthre = 0U; // preemption-threshold not used
if (prioSpec != 0U) { // priority-ceiling protocol used?
register_(); // register this mutex as AO
}
noexcept
//! try to lock the QXK priority-ceiling mutex QP::QXMutex
//!
//! @details
//! Try to lock the QXK priority ceiling mutex QP::QXMutex.
//!
//! @returns
//! 'true' if the mutex was successfully locked and 'false' if the mutex
//! was unavailable and was NOT locked.
//!
//! @note
//! This function **can** be called from both basic threads (active
//! objects) and extended threads.
//!
//! @note
//! The mutex locks are allowed to nest, meaning that the same extended
//! thread can lock the same mutex multiple times (< 255). However, each
//! successful call to QXMutex::tryLock() must be balanced by the
//! matching call to QXMutex::unlock().
QF_CRIT_STAT_
QF_CRIT_E_();
QActive *curr = QXK_attr_.curr;
if (curr == nullptr) { // called from a basic thread?
curr = registry_[QXK_attr_.actPrio];
}
//! @pre this function must:
//! - NOT be called from an ISR;
//! - the calling thread must be valid;
//! - the mutex-priority must be in range
Q_REQUIRE_ID(300, (!QXK_ISR_CONTEXT_()) // don't call from an ISR!
&& (curr != nullptr) // current thread must be valid
&& (m_prio <= QF_MAX_ACTIVE));
//! @pre also: the thread must NOT be holding a scheduler lock.
Q_REQUIRE_ID(301, QXK_attr_.lockHolder != curr->m_prio);
// is the mutex available?
if (m_eQueue.m_nFree == 0U) {
m_eQueue.m_nFree = 1U; // mutex lock nesting
//! @pre also: the newly locked mutex must have no holder yet
Q_REQUIRE_ID(302, m_thread == nullptr);
// set the new mutex holder to the curr thread and
// save the thread's prio/pthre in the mutex
// NOTE: reuse the otherwise unused eQueue data member.
m_thread = curr;
m_eQueue.m_head = static_cast<QEQueueCtr>(curr->m_prio);
m_eQueue.m_tail = static_cast<QEQueueCtr>(curr->m_pthre);
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
if (m_prio != 0U) { // priority-ceiling protocol used?
// the holder priority must be lower than that of the mutex
// and the priority slot must be occupied by this mutex
Q_ASSERT_ID(210, (curr->m_prio < m_prio)
&& (registry_[m_prio] == this));
// remove the thread's original prio from the ready set
// and insert the mutex's prio into the ready set
QF::readySet_.remove(
static_cast<std::uint_fast8_t>(m_eQueue.m_head));
QF::readySet_.insert(
static_cast<std::uint_fast8_t>(m_prio));
// put the thread into the AO registry in place of the mutex
registry_[m_prio] = curr;
// set thread's prio/pthre to that of the mutex
curr->m_prio = m_prio;
curr->m_pthre = m_pthre;
}
}
// is the mutex locked by this thread already (nested locking)?
else if (m_thread == curr) {
// the nesting level must not exceed the specified limit
Q_ASSERT_ID(320, m_eQueue.m_nFree < 0xFFU);
m_eQueue.m_nFree = m_eQueue.m_nFree + 1U; // lock one more level
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
}
else { // the mutex is already locked by a different thread
if (m_prio != 0U) { // priority-ceiling protocol used?
// the prio slot must be occupied by the thr. holding the mutex
Q_ASSERT_ID(340, registry_[m_prio]
== QXK_PTR_CAST_(QActive *, m_thread));
}
QS_BEGIN_NOCRIT_PRE_(QS_MTX_BLOCK_ATTEMPT, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
curr->m_prio); // trying thread prio
QS_END_NOCRIT_PRE_()
curr = nullptr; // means that mutex is NOT available
}
QF_CRIT_X_();
return curr != nullptr;
noexcept
//! lock the QXK priority-ceiling mutex QP::QXMutex
//!
//! @details
//! Lock the QXK priority ceiling mutex QP::QXMutex.
//!
//! @param[in] nTicks number of clock ticks (at the associated rate)
//! to wait for the mutex. The value of
//! QXTHREAD_NO_TIMEOUT indicates that no timeout will
//! occur and the mutex could block indefinitely.
//! @returns
//! 'true' if the mutex has been acquired and 'false' if a timeout
//! occurred.
//!
//! @note
//! The mutex locks are allowed to nest, meaning that the same extended
//! thread can lock the same mutex multiple times (< 255). However,
//! each call to QXMutex::lock() must be balanced by the matching call to
//! QXMutex::unlock().
//!
//! @usage
//! @include qxk_mutex.cpp
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const curr = QXK_PTR_CAST_(QXThread*, QXK_attr_.curr);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - be called from an extended thread;
//! - the mutex-priority must be in range
//! - the thread must NOT be already blocked on any object.
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) // don't call from an ISR!
&& (curr != nullptr) // current thread must be extended
&& (m_prio <= QF_MAX_ACTIVE)
&& (curr->m_temp.obj == nullptr)); // not blocked
//! @pre also: the thread must NOT be holding a scheduler lock
Q_REQUIRE_ID(201, QXK_attr_.lockHolder != curr->m_prio);
// is the mutex available?
bool locked = true; // assume that the mutex will be locked
if (m_eQueue.m_nFree == 0U) {
m_eQueue.m_nFree = 1U; // mutex lock nesting
//! @pre also: the newly locked mutex must have no holder yet
Q_REQUIRE_ID(202, m_thread == nullptr);
// set the new mutex holder to the curr thread and
// save the thread's prio/pthre in the mutex
// NOTE: reuse the otherwise unused eQueue data member.
m_thread = curr;
m_eQueue.m_head = static_cast<QEQueueCtr>(curr->m_prio);
m_eQueue.m_tail = static_cast<QEQueueCtr>(curr->m_pthre);
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
if (m_prio != 0U) { // priority-ceiling protocol used?
// the holder priority must be lower than that of the mutex
// and the priority slot must be occupied by this mutex
Q_ASSERT_ID(210, (curr->m_prio < m_prio)
&& (registry_[m_prio] == this));
// remove the thread's original prio from the ready set
// and insert the mutex's prio into the ready set
QF::readySet_.remove(
static_cast<std::uint_fast8_t>(m_eQueue.m_head));
QF::readySet_.insert(static_cast<std::uint_fast8_t>(m_prio));
// put the thread into the AO registry in place of the mutex
registry_[m_prio] = curr;
// set thread's prio/pthre to that of the mutex
curr->m_prio = m_prio;
curr->m_pthre = m_pthre;
}
}
// is the mutex locked by this thread already (nested locking)?
else if (m_thread == curr) {
// the nesting level beyond the arbitrary but high limit
// most likely means cyclic or recursive locking of a mutex.
Q_ASSERT_ID(220, m_eQueue.m_nFree < 0xFFU);
m_eQueue.m_nFree = m_eQueue.m_nFree + 1U; // lock one more level
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
}
else { // the mutex is already locked by a different thread
// the mutex holder must be valid
Q_ASSERT_ID(230, m_thread != nullptr);
if (m_prio != 0U) { // priority-ceiling protocol used?
// the prio slot must be occupied by the thr. holding the mutex
Q_ASSERT_ID(240, registry_[m_prio]
== QXK_PTR_CAST_(QActive *, m_thread));
}
// remove the curr thread's prio from the ready set (will block)
// and insert it to the waiting set on this mutex
std::uint_fast8_t const p =
static_cast<std::uint_fast8_t>(curr->m_prio);
QF::readySet_.remove(p);
m_waitSet.insert(p);
// set the blocking object (this mutex)
curr->m_temp.obj = QXK_PTR_CAST_(QMState*, this);
curr->teArm_(static_cast<enum_t>(QXK::TIMEOUT_SIG), nTicks);
QS_BEGIN_NOCRIT_PRE_(QS_MTX_BLOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
curr->m_prio);
QS_END_NOCRIT_PRE_()
// schedule the next thread if multitasking started
static_cast<void>(QXK_sched_()); // synchronous scheduling
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); // BLOCK here !!!
// AFTER unblocking...
QF_CRIT_E_();
// the blocking object must be this mutex
Q_ASSERT_ID(240, curr->m_temp.obj
== QXK_PTR_CAST_(QMState*, this));
// did the blocking time-out? (signal of zero means that it did)
if (curr->m_timeEvt.sig == 0U) {
if (m_waitSet.hasElement(p)) { // still waiting?
m_waitSet.remove(p); // remove unblocked thread
locked = false; // the mutex was NOT locked
}
}
else { // blocking did NOT time out
// the thread must NOT be waiting on this mutex
Q_ASSERT_ID(250, !m_waitSet.hasElement(p));
}
curr->m_temp.obj = nullptr; // clear blocking obj.
}
QF_CRIT_X_();
return locked;
noexcept
//! unlock the QXK priority-ceiling mutex QP::QXMutex
//!
//! @details
//! Unlock the QXK priority ceiling mutex.
//!
//! @note
//! This function **can** be called from both basic threads (active
//! objects) and extended threads.
//!
//! @note
//! The mutex locks are allowed to nest, meaning that the same extended
//! thread can lock the same mutex multiple times (< 255). However, each
//! call to QXMutex::lock() or a *successful* call to QXMutex::tryLock()
//! must be balanced by the matching call to QXMutex::unlock().
//!
//! @usage
//! @include qxk_mutex.cpp
QF_CRIT_STAT_
QF_CRIT_E_();
QActive *curr = QXK_attr_.curr;
if (curr == nullptr) { // called from a basic thread?
curr = registry_[QXK_attr_.actPrio];
}
//! @pre this function must:
//! - NOT be called from an ISR;
//! - the calling thread must be valid;
Q_REQUIRE_ID(400, (!QXK_ISR_CONTEXT_()) // don't call from an ISR!
&& (curr != nullptr)); // current thread must be valid
//! @pre also: the mutex must be already locked at least once
Q_REQUIRE_ID(401, m_eQueue.m_nFree > 0U);
//! @pre also: the mutex must be held by this thread
Q_REQUIRE_ID(402, m_thread == curr);
// is this the last nesting level?
if (m_eQueue.m_nFree == 1U) {
if (m_prio != 0U) { // priority-ceiling protocol used?
// restore the holding thread's prio/pthre from the mutex
curr->m_prio = static_cast<std::uint8_t>(m_eQueue.m_head);
curr->m_pthre = static_cast<std::uint8_t>(m_eQueue.m_tail);
// put the mutex back into the AO registry
registry_[m_prio] = this;
// remove the mutex' prio from the ready set
// and insert the original thread's priority
QF::readySet_.remove(
static_cast<std::uint_fast8_t>(m_prio));
QF::readySet_.insert(
static_cast<std::uint_fast8_t>(m_eQueue.m_head));
}
QS_BEGIN_NOCRIT_PRE_(QS_MTX_UNLOCK, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
0U);
QS_END_NOCRIT_PRE_()
// are any other threads waiting on this mutex?
if (m_waitSet.notEmpty()) {
// find the highest-priority thread waiting on this mutex
std::uint_fast8_t const p = m_waitSet.findMax();
// remove this thread from waiting on the mutex
// and insert it into the ready set.
m_waitSet.remove(p);
QF::readySet_.insert(p);
QXThread * const thr = QXK_PTR_CAST_(QXThread*, registry_[p]);
// the waiting thread must:
// - be registered in QF
// - have the priority corresponding to the registration
// - be an extended thread
// - be blocked on this mutex
Q_ASSERT_ID(410, (thr != (QXThread *)0)
&& (thr->m_prio == static_cast<std::uint8_t>(p))
&& (thr->m_state.act == Q_ACTION_CAST(0))
&& (thr->m_temp.obj == QXK_PTR_CAST_(QMState*, this)));
// disarm the internal time event
static_cast<void>(thr->teDisarm_());
// set the new mutex holder to the curr thread and
// save the thread's prio/pthre in the mutex
// NOTE: reuse the otherwise unused eQueue data member.
m_thread = thr;
m_eQueue.m_head = static_cast<QEQueueCtr>(thr->m_prio);
m_eQueue.m_tail = static_cast<QEQueueCtr>(thr->m_pthre);
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, thr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
if (m_prio != 0U) { // priority-ceiling protocol used?
// the holder priority must be lower than that of the mutex
Q_ASSERT_ID(410, thr->m_prio < m_prio);
// set thread's preemption-threshold to that of the mutex
thr->m_pthre = m_pthre;
// put the thread into AO registry in place of the mutex
registry_[m_prio] = thr;
}
}
else { // no threads are waiting for this mutex
m_eQueue.m_nFree = 0U; // free up the nesting count
// the mutex no longer held by any thread
m_thread = nullptr;
m_eQueue.m_head = 0U;
m_eQueue.m_tail = 0U;
if (m_prio != 0U) { // priority-ceiling protocol used?
// put the mutex back at the original mutex slot
registry_[m_prio] = QXK_PTR_CAST_(QActive*, this);
}
}
// schedule the next thread if multitasking started
if (QXK_sched_() != 0U) { // synchronous preemption needed?
QXK_activate_(); // synchronously activate basic threads
}
}
else { // releasing one level of nested mutex lock
Q_ASSERT_ID(420, m_eQueue.m_nFree > 0U);
m_eQueue.m_nFree = m_eQueue.m_nFree - 1U; // unlock one level
QS_BEGIN_NOCRIT_PRE_(QS_MTX_UNLOCK_ATTEMPT, curr->m_prio)
QS_TIME_PRE_(); // timestamp
QS_OBJ_PRE_(this); // this mutex
QS_2U8_PRE_(static_cast<std::uint8_t>(m_eQueue.m_head),
static_cast<std::uint8_t>(m_eQueue.m_nFree));
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
//! QF initialization for QXK
#if (QF_MAX_EPOOL > 0U)
QF::maxPool_ = 0U;
#endif
bzero(&QTimeEvt::timeEvtHead_[0], sizeof(QTimeEvt::timeEvtHead_));
bzero(&QActive::registry_[0], sizeof(QActive::registry_));
bzero(&QF::readySet_, sizeof(QF::readySet_));
bzero(&QXK_attr_, sizeof(QXK_attr_));
// setup the QXK scheduler as initially locked and not running
QXK_attr_.lockCeil = (QF_MAX_ACTIVE + 1U); // scheduler locked
// storage capable for holding a blank QActive object (const in ROM)
static void* const
idle_ao[((sizeof(QActive) + sizeof(void*)) - 1U) / sizeof(void*)]
= { nullptr };
// register the blank QActive object as the idle-AO (cast 'const' away)
QActive::registry_[0] = QF_CONST_CAST_(QActive*,
reinterpret_cast<QActive const*>(idle_ao));
#ifdef QXK_INIT
QXK_INIT(); // port-specific initialization of the QXK kernel
#endif
//! stop the QF customization for QXK
//!
//! @sa QF::onCleanup()
onCleanup(); // cleanup callback
// nothing else to do for the QXK preemptive kernel
//! QF::run() customization for QXK kernel
#ifdef Q_SPY
QS_SIG_DICTIONARY(QP::QXK::DELAY_SIG, nullptr);
QS_SIG_DICTIONARY(QP::QXK::TIMEOUT_SIG, nullptr);
// produce the QS_QF_RUN trace record
QF_INT_DISABLE();
QS::beginRec_(QS_REC_NUM_(QS_QF_RUN));
QS::endRec_();
QF_INT_ENABLE();
#endif
onStartup(); // startup callback
QF_INT_DISABLE();
QXK_attr_.lockCeil = 0U; // unlock the QXK scheduler
// activate AOs to process events posted so far
if (QXK_sched_() != 0U) {
QXK_activate_();
}
#ifdef QXK_START
QXK_START(); /* port-specific startup of the QXK kernel */
#endif
QF_INT_ENABLE();
for (;;) { // QXK idle loop...
QXK::onIdle(); // application-specific QXK idle callback
}
#ifdef __GNUC__ // GNU compiler?
return 0;
#endif
//! QP::QActive port for QXK
//! Starts execution of an active object and registers the object
//! with the framework customized for QXK
Q_UNUSED_PAR(stkSto); // not needed in QXK
Q_UNUSED_PAR(stkSize); // not needed in QXK
//! @pre AO cannot be started:
//! - from an ISR;
//! - the stack storage must NOT be provided
//! - preemption-threshold is NOT provided (because QXK kernel
//! does not support preemption-threshold scheduling)
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_())
&& (stkSto == nullptr)
&& ((prioSpec & 0xFF00U) == 0U));
m_prio = static_cast<std::uint8_t>(prioSpec & 0xFFU); // QF-prio.
m_pthre = 0U; // preemption-threshold NOT used
register_(); // make QF aware of this AO
m_eQueue.init(qSto, qLen); // initialize QEQueue of this AO
m_osObject = nullptr; // no private stack for AO
this->init(par, m_prio); // take the top-most initial tran. (virtual)
QS_FLUSH(); // flush the trace buffer to the host
// see if this AO needs to be scheduled in case QXK is running
QF_CRIT_STAT_
QF_CRIT_E_();
if (QXK_attr_.lockCeil <= QF_MAX_ACTIVE) { // scheduler running?
if (QXK_sched_() != 0U) { // synchronous preemption needed?
QXK_activate_(); // synchronously activate basic threads
}
}
QF_CRIT_X_();
//! No-timeout when blocking on semaphores, mutextes, and queues
{0U};
//! attributes of the QXK kernel (extern "C" for easy access in assembly)
{
QP::QActive * volatile curr; //!< currently executing thread
QP::QActive * volatile next; //!< next thread to execute
QP::QActive * volatile prev; //!< previous thread
std::uint8_t volatile actPrio; //!< QF-prio of the active AO
std::uint8_t volatile lockCeil; //!< lock preemption-ceiling (0==no-lock)
std::uint8_t volatile lockHolder; //!< prio of the lock holder
};
//! attributes of the QXK kernel (extern "C" to be accessible from C)
noexcept
//! QXK scheduler finds the highest-priority thread ready to run
//!
//! @details
//! The QXK scheduler finds the priority of the highest-priority thread
//! that is ready to run.
//!
//! @returns the 1-based priority of the the active object to run next,
//! or zero if no eligible active object is found.
//!
//! @attention
//! QXK_sched_() must be always called with interrupts **disabled** and
//! returns with interrupts **disabled**.
std::uint_fast8_t p;
if (QP::QF::readySet_.isEmpty()) {
p = 0U; // no activation needed
}
else {
// find the highest-prio thread ready to run
p = QP::QF::readySet_.findMax();
if (p <= QXK_attr_.lockCeil) {
// priority of the thread holding the lock
p = static_cast<std::uint_fast8_t>(
QP::QActive::registry_[QXK_attr_.lockHolder]->m_prio);
if (p != 0U) {
Q_ASSERT_ID(610, QP::QF::readySet_.hasElement(p));
}
}
}
QP::QActive const * const curr = QXK_attr_.curr;
QP::QActive * const next = QP::QActive::registry_[p];
// the thread found must be registered in QF
Q_ASSERT_ID(620, next != nullptr);
// is the current thread a basic-thread?
if (curr == nullptr) {
// is the new priority above the active priority?
if (p > QXK_attr_.actPrio) {
QXK_attr_.next = next; // set the next AO to activate
if (next->m_osObject != nullptr) { // is next extended?
QXK_CONTEXT_SWITCH_();
p = 0U; // no activation needed
}
}
else { // below the pre-thre
QXK_attr_.next = nullptr;
p = 0U; // no activation needed
}
}
else { // currently executing an extended-thread
// is the current thread different from the next?
if (curr != next) {
QXK_attr_.next = next;
QXK_CONTEXT_SWITCH_();
}
else { // next is the same as current
QXK_attr_.next = nullptr; // no need to context-switch
}
p = 0U; // no activation needed
}
return p;
noexcept
//! QXK activator activates the next active object. The activated AO preempts
//! the currently executing AOs
//!
//! @attention
//! QXK_activate_() must be always called with interrupts **disabled** and
//! returns with interrupts **disabled**.
//!
//! @note
//! The activate function might enable interrupts internally, but it always
//! returns with interrupts **disabled**.
std::uint8_t const prio_in = QXK_attr_.actPrio;
QP::QActive *next = QXK_attr_.next; // the next AO (basic-thread) to run
//! @pre QXK_attr_.next must be valid and the prio must be in range
Q_REQUIRE_ID(700, (next != nullptr) && (prio_in <= QF_MAX_ACTIVE));
// QXK Context switch callback defined or QS tracing enabled?
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
QXK_contextSw(next);
#endif // QF_ON_CONTEXT_SW || Q_SPY
QXK_attr_.next = nullptr; // clear the next AO
QXK_attr_.curr = nullptr; // current is basic-thread
// priority of the next thread
std::uint8_t p = next->m_prio;
// loop until no more ready-to-run AOs of higher prio than the initial
do {
QXK_attr_.actPrio = p; // next active prio
QF_INT_ENABLE(); // unconditionally enable interrupts
// perform the run-to-completion (RTC) step...
// 1. retrieve the event from the AO's event queue, which by this
// time must be non-empty and QActive_get_() asserts it.
// 2. dispatch the event to the AO's state machine.
// 3. determine if event is garbage and collect it if so
//
QP::QEvt const * const e = next->get_();
next->dispatch(e, next->m_prio);
#if (QF_MAX_EPOOL > 0U)
QP::QF::gc(e);
#endif
QF_INT_DISABLE(); // unconditionally disable interrupts
if (next->m_eQueue.isEmpty()) { // empty queue?
QP::QF::readySet_.remove(p);
}
if (QP::QF::readySet_.isEmpty()) {
QXK_attr_.next = nullptr;
next = QP::QActive::registry_[0];
p = 0U; // no activation needed
}
else {
// find new highest-prio AO ready to run...
p = static_cast<std::uint8_t>(QP::QF::readySet_.findMax());
next = QP::QActive::registry_[p];
// the AO must be registered in QF
Q_ASSERT_ID(710, next != nullptr);
// is the new priority below the lock ceiling?
if (p <= QXK_attr_.lockCeil) {
p = QXK_attr_.lockHolder;
if (p != 0U) {
Q_ASSERT_ID(720, QP::QF::readySet_.hasElement(p));
}
}
// is the next a basic thread?
if (next->m_osObject == nullptr) {
// is the next priority above the initial priority?
if (p > QP::QActive::registry_[prio_in]->m_prio) {
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (p != QXK_attr_.actPrio) { // changing threads?
QXK_contextSw(next);
}
#endif // QF_ON_CONTEXT_SW || Q_SPY
QXK_attr_.next = next;
}
else {
QXK_attr_.next = nullptr;
p = 0U; // no activation needed
}
}
else { // next is the extended-thread
QXK_attr_.next = next;
QXK_CONTEXT_SWITCH_();
p = 0U; // no activation needed
}
}
} while (p != 0U); // while activation needed
// restore the active priority
QXK_attr_.actPrio = prio_in;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (next->m_osObject == nullptr) {
QXK_contextSw((prio_in == 0U)
? nullptr
: QP::QActive::registry_[prio_in]);
}
#endif // QF_ON_CONTEXT_SW || Q_SPY
noexcept
//! obtain the currently executing active-object/thread
//!
//! @returns
//! pointer to the currently executing active-object/thread
//! @pre the QXK kernel must be running
Q_REQUIRE_ID(800, QXK_attr_.lockCeil <= QF_MAX_ACTIVE);
QF_CRIT_STAT_
QF_CRIT_E_();
QP::QActive *curr = QXK_attr_.curr;
if (curr == nullptr) { // basic thread?
curr = QP::QActive::registry_[QXK_attr_.actPrio];
}
QF_CRIT_X_();
//! @post the current thread must be valid
Q_ENSURE_ID(890, curr != nullptr);
return curr;
noexcept
//! initialize the private stack of a given AO
//! QXK context switch management
//!
//! @details
//! This function performs software tracing (if #Q_SPY is defined)
//! and calls QXK_onContextSw() (if #QF_ON_CONTEXT_SW is defined)
//!
//! @param[in] next pointer to the next thread (NULL for basic-thread)
//!
//! @attention
//! QXK_contextSw() is invoked with interrupts **disabled** and must also
//! return with interrupts **disabled**.
#ifdef Q_SPY
std::uint8_t const prev_prio = (QXK_attr_.prev != nullptr)
? QXK_attr_.prev->m_prio
: 0U;
#endif // Q_SPY
if (next != nullptr) { // next is NOT idle?
QS_BEGIN_NOCRIT_PRE_(QP::QS_SCHED_NEXT, next->m_prio)
QS_TIME_PRE_(); // timestamp
QS_2U8_PRE_(next->m_prio, prev_prio);
QS_END_NOCRIT_PRE_()
}
else { // going to idle
QS_BEGIN_NOCRIT_PRE_(QP::QS_SCHED_IDLE, prev_prio)
QS_TIME_PRE_(); // timestamp
QS_U8_PRE_(prev_prio);
QS_END_NOCRIT_PRE_()
}
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QXK_attr_.prev, next);
#endif // QF_ON_CONTEXT_SW
QXK_attr_.prev = next; // update the previous thread
//! called when a thread function exits
//!
//! @details
//! Called when the extended-thread handler function exits.
//!
//! @note
//! Most thread handler functions are structured as endless loops that never
//! exit. But it is also possible to structure threads as one-shot functions
//! that perform their job and exit. In that case this function peforms
//! cleanup after the thread.
QF_CRIT_STAT_
QF_CRIT_E_();
QP::QXThread const * const thr = QXTHREAD_CAST_(QXK_attr_.curr);
//! @pre this function must:
//! - NOT be called from an ISR;
//! - be called from an extended thread;
Q_REQUIRE_ID(900, (!QXK_ISR_CONTEXT_())
&& (thr != nullptr));
//! @pre also: the thread must NOT be holding a scheduler lock.
Q_REQUIRE_ID(901, QXK_attr_.lockHolder != thr->m_prio);
std::uint_fast8_t const p =
static_cast<std::uint_fast8_t>(thr->m_prio);
// remove this thread from the QF
QP::QActive::registry_[p] = nullptr;
QP::QF::readySet_.remove(p);
static_cast<void>(QXK_sched_()); // synchronous scheduling
QF_CRIT_X_();
<type_>
//! intertnal macro to encapsulate casting of pointers for MISRA deviations
//!
//! @details
//! This macro is specifically and exclusively used for casting pointers
//! that are never de-referenced, but only used for internal bookkeeping and
//! checking (via assertions) the correct operation of the QXK kernel.
//! Such pointer casting is not compliant with MISRA C++ Rule 5-2-7
//! as well as other messages (e.g., PC-Lint-Plus warning 826).
//! Defining this specific macro for this purpose allows to selectively
//! disable the warnings for this particular case.
(reinterpret_cast<type_>(ptr_))
//! internal macro to encapsulate casting of pointers for MISRA deviations
//!
//! @details
//! This macro is specifically and exclusively used for downcasting pointers
//! to QActive to pointers to QXThread in situations when it is known
//! that such downcasting is correct.<br>
//!
//! However, such pointer casting is not compliant with MISRA C++
//! Rule 5-2-7 as well as other messages (e.g., PC-Lint-Plus warning 826).
//! Defining this specific macro for this purpose allows to selectively
//! disable the warnings for this particular case.
(static_cast<QP::QXThread *>(ptr_))
//! Internal port-specific macro that checks the execution context
//! (ISR vs. thread). Might be overridden in qxk_port.hpp.
//!
//! @returns
//! 'true' if the code executes in the ISR context and 'false' otherwise.
(QF::intNest_ != 0U)
//! QXK scheduler lock status
QSchedStatus lockStat_;
//! QXK selective scheduler locking
do { \
if (QXK_ISR_CONTEXT_()) { \
lockStat_ = 0xFFU; \
} else { \
lockStat_ = QXK::schedLock((ceil_)); \
} \
} while (false)
//! QXK selective scheduler unlocking
do { \
if (lockStat_ != 0xFFU) { \
QXK::schedUnlock(lockStat_); \
} \
} while (false)
// QXK native event queue waiting
\
Q_ASSERT_ID(110, (me_)->m_eQueue.m_frontEvt != nullptr)
// QXK native event queue signalling
do { \
QF::readySet_.insert( \
static_cast<std::uint_fast8_t>((me_)->m_prio)); \
if (!QXK_ISR_CONTEXT_()) { \
if (QXK_sched_() != 0U) { \
QXK_activate_(); \
} \
} \
} while (false)
//! @file
//! @brief QEP/C++ platform-independent public interface.
//!
//! @tr{RQP001} @tr{RQP101}
#ifndef QP_INC_QEP_HPP_
#define QP_INC_QEP_HPP_
//============================================================================
//! The current QP version as an unsigned number
//
// @details
// ::QP_VERSION is a decimal constant, where XX is a 1-digit or 2-digit
// major version number, Y is a 1-digit minor version number, and Z is
// a 1-digit release number.
//
#define QP_VERSION 720U
//! The current QP version as a zero terminated string literal.
//
// @details
// ::QP_VERSION_STR is of the form "XX.Y.Z", where XX is a 1-or 2-digit
// major version number, Y is a 1-digit minor version number, and Z is
// a 1-digit release number.
//
#define QP_VERSION_STR "7.2.0"
//! Encrypted current QP release (7.2.0) and date (2023-01-06)
#define QP_RELEASE 0x76D8998FU
//============================================================================
// Global namespace...
$declare ${glob-types}
$declare ${QEP-config}
//============================================================================
$declare ${QEP}
//============================================================================
$declare ${QEP-macros}
#endif // QP_INC_QEP_HPP_
//! @file
//! @brief QF/C++ platform-independent public interface.
#ifndef QP_INC_QF_HPP_
#define QP_INC_QF_HPP_
#ifdef Q_EVT_CTOR
#include <new> // for placement new
#include <cstdarg> // for va_list
#endif // Q_EVT_CTOR
//============================================================================
// Global namespace...
$declare ${QF-config}
//============================================================================
$declare ${QF-types}
$declare ${QF::QActive}
$declare ${QF::QMActive}
$declare ${QF::QTimeEvt}
$declare ${QF::QTicker}
$declare ${QF::QF-base}
$declare ${QF::QF-dyn}
//============================================================================
extern "C" {
$declare ${QF-extern-C}
} // extern "C"
//============================================================================
// Global namespace...
$declare ${QF-macros}
#endif // QP_INC_QF_HPP_
//! @file
//! @brief Internal (package scope) QF/C++ interface.
#ifndef QP_INC_QF_PKG_HPP_
#define QP_INC_QF_PKG_HPP_
//============================================================================
//! helper macro to cast const away from an event pointer
#define QF_CONST_CAST_(type_, ptr_) const_cast<type_>(ptr_)
$declare ${QF::QF-pkg}
//============================================================================
namespace QP {
//............................................................................
//! Structure representing a free block in the Native QF Memory Pool
//! @sa QP::QMPool
struct QFreeBlock {
QFreeBlock * volatile m_next; //!< link to the next free block
};
//............................................................................
// The following flags and flags and bitmasks are for the fields of the
// `QEvt.refCtr_` attribute of the QP::QTimeEvt class (subclass of QP::QEvt).
// This attribute is NOT used for reference counting in time events
// because the `QEvt.poolId_` attribute is zero ("static event").
//
constexpr std::uint8_t TE_IS_LINKED = 1U << 7U; // flag
constexpr std::uint8_t TE_WAS_DISARMED = 1U << 6U; // flag
constexpr std::uint8_t TE_TICK_RATE = 0x0FU; // bitmask
// internal helper inline functions
//! increment the refCtr_ of an event `e`
inline void QEvt_refCtr_inc_(QEvt const * const e) noexcept {
(QF_CONST_CAST_(QEvt*, e))->refCtr_ = e->refCtr_ + 1U;
}
//! decrement the refCtr_ of an event `e`
inline void QEvt_refCtr_dec_(QEvt const * const e) noexcept {
(QF_CONST_CAST_(QEvt*, e))->refCtr_ = e->refCtr_ - 1U;
}
} // namespace QP
//============================================================================
// QF-specific critical section...
#ifndef QF_CRIT_STAT_TYPE
//! This is an internal macro for defining the critical section
//! status type.
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! provides the definition of the critical section status variable.
//! Otherwise this macro is empty.
//! @sa #QF_CRIT_STAT_TYPE
#define QF_CRIT_STAT_
//! This is an internal macro for entering a critical section.
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! invokes QF_CRIT_ENTRY() passing the key variable as the parameter.
//! Otherwise QF_CRIT_ENTRY() is invoked with a dummy parameter.
//! @sa QF_CRIT_ENTRY()
#define QF_CRIT_E_() QF_CRIT_ENTRY(dummy)
//! This is an internal macro for exiting a critical section.
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! invokes QF_CRIT_EXIT() passing the key variable as the parameter.
//! Otherwise QF_CRIT_EXIT() is invoked with a dummy parameter.
//! @sa QF_CRIT_EXIT()
//!
#define QF_CRIT_X_() QF_CRIT_EXIT(dummy)
#elif (!defined QF_CRIT_STAT_)
#define QF_CRIT_STAT_ QF_CRIT_STAT_TYPE critStat_;
#define QF_CRIT_E_() QF_CRIT_ENTRY(critStat_)
#define QF_CRIT_X_() QF_CRIT_EXIT(critStat_)
#endif // QF_CRIT_STAT_TYPE
// Assertions inside the critical section ------------------------------------
#ifdef Q_NASSERT // Q_NASSERT defined--assertion checking disabled
#define Q_ASSERT_CRIT_(id_, test_) static_cast<void>(0)
#define Q_REQUIRE_CRIT_(id_, test_) static_cast<void>(0)
#define Q_ERROR_CRIT_(id_) static_cast<void>(0)
#else // Q_NASSERT not defined--assertion checking enabled
#define Q_ASSERT_CRIT_(id_, test_) do { \
if ((test_)) {} else { \
QF_CRIT_X_(); \
Q_onAssert(&Q_this_module_[0], static_cast<int_t>(id_)); \
} \
} while (false)
#define Q_REQUIRE_CRIT_(id_, test_) Q_ASSERT_CRIT_((id_), (test_))
#define Q_ERROR_CRIT_(id_) do { \
QF_CRIT_X_(); \
Q_onAssert(&Q_this_module_[0], static_cast<int_t>(id_)); \
} while (false)
#endif // Q_NASSERT
//! helper macro to test that a pointer `x_` is in range between
//! `min_` and `max_`
//!
//! @details
//! This macro is specifically and exclusively used for checking the range
//! of a block pointer returned to the pool. Such a check must rely on the
//! pointer arithmetic not compliant with the [AUTOSAR Rule M5-0-18].
//! Defining a specific macro for this purpose allows to selectively
//! disable the warnings for this particular case.
#define QF_PTR_RANGE_(x_, min_, max_) (((min_) <= (x_)) && ((x_) <= (max_)))
#endif // QP_INC_QF_PKG_HPP_
//! @file
//! @brief platform-independent fast "raw" thread-safe event queue interface
//!
//! @details
//! This header file must be included in all QF ports that use native QF
//! event queue for active objects. Also, this file needs to be included
//! in the QP/C++ library when the application uses QActive::defer() /
//! QActive::recall(). Finally, this file is also needed when the "raw"
//! thread-safe queues are used for communication between active objects
//! and non-framework entities, such as ISRs, device drivers, or legacy
//! code.
#ifndef QP_INC_QEQUEUE_HPP_
#define QP_INC_QEQUEUE_HPP_
#ifndef QF_EQUEUE_CTR_SIZE
//! The size (in bytes) of the ring-buffer counters used in the
//! native QF event queue implementation. Valid values: 1U, 2U, or 4U;
//! default 1U.
//! @details
//! This macro can be defined in the QF port file (qf_port.hpp) to
//! configure the QP::QEQueueCtr type. Here the macro is not defined
//! so the default of 1 byte is chosen.
#define QF_EQUEUE_CTR_SIZE 1U
#endif
namespace QP {
#if (QF_EQUEUE_CTR_SIZE == 1U)
//! The data type to store the ring-buffer counters based on
//! the macro #QF_EQUEUE_CTR_SIZE.
//! @details
//! The dynamic range of this data type determines the maximum length
//! of the ring buffer managed by the native QF event queue.
using QEQueueCtr = std::uint8_t;
#elif (QF_EQUEUE_CTR_SIZE == 2U)
using QEQueueCtr = std::uint16_t;
#elif (QF_EQUEUE_CTR_SIZE == 4U)
using QEQueueCtr = std::uint32_t;
#else
#error "QF_EQUEUE_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
} // namespace QP
//============================================================================
$declare ${QF::QEQueue}
#endif // QP_INC_QEQUEUE_HPP_
//! @file
//! @brief platform-independent memory pool QP::QMPool interface.
#ifndef QP_INC_QMPOOL_HPP_
#define QP_INC_QMPOOL_HPP_
#ifndef QF_MPOOL_SIZ_SIZE
//! macro to override the default QP::QMPoolSize size.
//! Valid values 1U, 2U, or 4U; default 2U
#define QF_MPOOL_SIZ_SIZE 2U
#endif
#ifndef QF_MPOOL_CTR_SIZE
//! macro to override the default QMPoolCtr size.
//! Valid values 1U, 2U, or 4U; default 2U
#define QF_MPOOL_CTR_SIZE 2
#endif
//! Memory pool element to allocate correctly aligned storage for QP::QMPool
#define QF_MPOOL_EL(type_) \
struct { void *sto_[((sizeof(type_) - 1U)/sizeof(void*)) + 1U]; }
//============================================================================
namespace QP {
#if (QF_MPOOL_SIZ_SIZE == 1U)
using QMPoolSize = std::uint8_t;
#elif (QF_MPOOL_SIZ_SIZE == 2U)
//! The data type to store the block-size based on the macro
//! #QF_MPOOL_SIZ_SIZE
//!
//! @details
//! The dynamic range of this data type determines the maximum size
//! of blocks that can be managed by the native QF event pool.
using QMPoolSize = std::uint16_t;
#elif (QF_MPOOL_SIZ_SIZE == 4U)
using QMPoolSize = std::uint32_t;
#else
#error "QF_MPOOL_SIZ_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
#if (QF_MPOOL_CTR_SIZE == 1U)
using QMPoolCtr = std::uint8_t;
#elif (QF_MPOOL_CTR_SIZE == 2U)
//! The data type to store the block-counter based on the macro
//! #QF_MPOOL_CTR_SIZE
//!
//! @details
//! The dynamic range of this data type determines the maximum number
//! of blocks that can be stored in the pool.
using QMPoolCtr = std::uint16_t;
#elif (QF_MPOOL_CTR_SIZE == 4U)
using QMPoolCtr = std::uint32_t;
#else
#error "QF_MPOOL_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
} // namespace QP
//============================================================================
$declare ${QF::QMPool}
#endif // QP_INC_QMPOOL_HPP_
//! @file
//! @brief QV/C++ platform-independent public interface.
#ifndef QP_INC_QV_HPP_
#define QP_INC_QV_HPP_
//============================================================================
// QF customization for QV -- data members of the QActive class...
// QV event-queue used for AOs
#define QF_EQUEUE_TYPE QEQueue
//============================================================================
#include "qequeue.hpp" // QV kernel uses the native QF event queue
#include "qmpool.hpp" // QV kernel uses the native QF memory pool
#include "qf.hpp" // QF framework integrates directly with QV
//============================================================================
$declare ${QV::QV-base}
//============================================================================
// interface used only inside QF, but not in applications
#ifdef QP_IMPL
// QV-specific scheduler locking and event queue...
$declare ${QV-impl}
// Native QF event pool operations...
$declare ${QF-QMPool-impl}
#endif // QP_IMPL
#endif // QP_INC_QV_HPP_
//! @file
//! @brief QK/C++ platform-independent public interface.
#ifndef QP_INC_QK_HPP_
#define QP_INC_QK_HPP_
//============================================================================
// QF customization for QK -- data members of the QActive class...
// QK event-queue used for AOs
#define QF_EQUEUE_TYPE QEQueue
// QK thread type used for AOs
// QK uses this member to store the private Thread-Local Storage pointer.
#define QF_THREAD_TYPE void*
//============================================================================
#include "qequeue.hpp" // QK kernel uses the native QF event queue
#include "qmpool.hpp" // QK kernel uses the native QF memory pool
#include "qf.hpp" // QF framework integrates directly with QK
//============================================================================
$declare ${QK::QK-base}
//============================================================================
extern "C" {
$declare ${QK-extern-C}
} // extern "C"
//============================================================================
// interface used only inside QF, but not in applications
#ifdef QP_IMPL
// QK-specific scheduler locking and event queue...
$declare ${QK-impl}
// Native QF event pool operations...
$declare ${QF-QMPool-impl}
#endif // QP_IMPL
#endif // QP_INC_QK_HPP_
//! @file
//! @brief QXK/C++ preemptive extended (blocking) kernel, platform-independent
//! public interface.
#ifndef QP_INC_QXK_HPP_
#define QP_INC_QXK_HPP_
//============================================================================
// QF customization for QXK -- data members of the QActive class...
// QXK event-queue used for AOs
#define QF_EQUEUE_TYPE QEQueue
// QXK OS-object used to store the private stack pointer for extended threads.
// (The private stack pointer is NULL for basic-threads).
#define QF_OS_OBJECT_TYPE void*
// QXK thread type used to store the private Thread-Local Storage pointer.
#define QF_THREAD_TYPE void*
//! Access Thread-Local Storage (TLS) and cast it on the given `type_`
#define QXK_TLS(type_) (static_cast<type_>(QXK_current()->m_thread))
//============================================================================
#include "qequeue.hpp" // QXK kernel uses the native QF event queue
#include "qmpool.hpp" // QXK kernel uses the native QF memory pool
#include "qf.hpp" // QF framework integrates directly with QXK
//============================================================================
$declare ${QXK::QXTHREAD_NO_TIMEOUT}
$declare ${QXK::QXK-base}
$declare ${QXK::QXThread}
$declare ${QXK::QXSemaphore}
$declare ${QXK::QXMutex}
//============================================================================
extern "C" {
$declare ${QXK-extern-C}
} // extern "C"
//============================================================================
// interface used only inside QF, but not in applications
#ifdef QP_IMPL
// QXK implementation...
$declare ${QXK-impl}
// Native QF event pool operations...
$declare ${QF-QMPool-impl}
#endif // QP_IMPL
#endif // QP_INC_QXK_HPP_
//! @file
//! @brief QS/C++ platform-independent public interface.
#ifndef QP_INC_QS_HPP_
#define QP_INC_QS_HPP_
#ifndef Q_SPY
#error "Q_SPY must be defined to include qs.hpp"
#endif
//============================================================================
// Global namespace...
$declare ${QS-config}
//============================================================================
$declare ${QS}
//============================================================================
// Global namespace...
$declare ${QS-macros}
//============================================================================
// Facilities for QS critical section
// QS-specific critical section
#ifdef QS_CRIT_ENTRY // separate QS critical section defined?
#ifndef QS_CRIT_STAT_TYPE
#define QS_CRIT_STAT_
#define QS_CRIT_E_() QS_CRIT_ENTRY(dummy)
#define QS_CRIT_X_() QS_CRIT_EXIT(dummy); QS_REC_DONE()
#else
#define QS_CRIT_STAT_ QS_CRIT_STAT_TYPE critStat_;
#define QS_CRIT_E_() QS_CRIT_ENTRY(critStat_)
#define QS_CRIT_X_() QS_CRIT_EXIT(critStat_); QS_REC_DONE()
#endif // QS_CRIT_STAT_TYPE
#else // separate QS critical section not defined--use the QF definition
#ifndef QF_CRIT_STAT_TYPE
//! This is an internal macro for defining the critical section
//! status type
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! provides the definition of the critical section status variable.
//! Otherwise this macro is empty.
//! @sa #QF_CRIT_STAT_TYPE
#define QS_CRIT_STAT_
//! This is an internal macro for entering a critical section
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! invokes #QF_CRIT_ENTRY passing the key variable as the parameter.
//! Otherwise #QF_CRIT_ENTRY is invoked with a dummy parameter.
//! @sa #QF_CRIT_ENTRY
#define QS_CRIT_E_() QF_CRIT_ENTRY(dummy)
//! This is an internal macro for exiting a critical section
//!
//! @details
//! The purpose of this macro is to enable writing the same code for the
//! case when critical section status type is defined and when it is not.
//! If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
//! invokes #QF_CRIT_EXIT passing the key variable as the parameter.
//! Otherwise #QF_CRIT_EXIT is invoked with a dummy parameter.
//! @sa #QF_CRIT_EXIT
#define QS_CRIT_X_() QF_CRIT_EXIT(dummy); QS_REC_DONE()
#elif (!defined QS_CRIT_STAT_)
#define QS_CRIT_STAT_ QF_CRIT_STAT_TYPE critStat_;
#define QS_CRIT_E_() QF_CRIT_ENTRY(critStat_)
#define QS_CRIT_X_() QF_CRIT_EXIT(critStat_); QS_REC_DONE()
#endif // simple unconditional interrupt disabling used
#endif // separate QS critical section not defined
//============================================================================
// Macros for use in QUTest only
#ifdef Q_UTEST
$declare ${QUTest}
//----------------------------------------------------------------------------
// QP-stub for QUTest
// NOTE: The QP-stub is needed for unit testing QP applications,
// but might NOT be needed for testing QP itself.
//
#if Q_UTEST != 0
$declare ${QUTest-stub::QS}
$declare ${QUTest-stub::QHsmDummy}
$declare ${QUTest-stub::QActiveDummy}
#endif // Q_UTEST != 0
//! QS macro to define the Test-Probe for a given `fun_`
#define QS_TEST_PROBE_DEF(fun_) \
std::uint32_t const qs_tp_ = \
QP::QS::getTestProbe_(QP::QS::force_cast<void (*)(void)>(fun_));
//! QS macro to apply a Test-Probe
#define QS_TEST_PROBE(code_) \
if (qs_tp_ != 0U) { code_ }
//! QS macro to apply a Test-Probe
#define QS_TEST_PROBE_ID(id_, code_) \
if (qs_tp_ == static_cast<std::uint32_t>(id_)) { code_ }
//! QS macro to pause test execution and enter the test event loop
#define QS_TEST_PAUSE() (QP::QS::test_pause_())
#else
// dummy definitions when not building for QUTEST
#define QS_TEST_PROBE_DEF(fun_)
#define QS_TEST_PROBE(code_)
#define QS_TEST_PROBE_ID(id_, code_)
#define QS_TEST_PAUSE() ((void)0)
#endif // Q_UTEST
#endif // QP_INC_QS_HPP_
//! @file
//! @brief Dummy definitions of the QS macros that avoid code generation from
//! the QS instrumentation.
#ifndef QP_INC_QS_DUMMY_HPP_
#define QP_INC_QS_DUMMY_HPP_
#ifdef Q_SPY
#error "Q_SPY must NOT be defined to include qs_dummy.hpp"
#endif
#define QS_INIT(arg_) (true)
#define QS_EXIT() static_cast<void>(0)
#define QS_DUMP() static_cast<void>(0)
#define QS_GLB_FILTER(rec_) static_cast<void>(0)
#define QS_LOC_FILTER(qs_id_) static_cast<void>(0)
#define QS_GET_BYTE(pByte_) (0xFFFFU)
#define QS_GET_BLOCK(pSize_) (nullptr)
#define QS_BEGIN_ID(rec_, qs_id_) if (false) {
#define QS_END() }
#define QS_BEGIN_NOCRIT(rec_, qs_id_) if (false) {
#define QS_END_NOCRIT() }
#define QS_I8(width_, data_) static_cast<void>(0)
#define QS_U8(width_, data_) static_cast<void>(0)
#define QS_I16(width_, data_) static_cast<void>(0)
#define QS_U16(width_, data_) static_cast<void>(0)
#define QS_I32(width_, data_) static_cast<void>(0)
#define QS_U32(width_, data_) static_cast<void>(0)
#define QS_F32(width_, data_) static_cast<void>(0)
#define QS_F64(width_, data_) static_cast<void>(0)
#define QS_I64(width_, data_) static_cast<void>(0)
#define QS_U64(width_, data_) static_cast<void>(0)
#define QS_ENUM(group_, value_) static_cast<void>(0)
#define QS_STR(str_) static_cast<void>(0)
#define QS_MEM(mem_, size_) static_cast<void>(0)
#define QS_SIG(sig_, obj_) static_cast<void>(0)
#define QS_OBJ(obj_) static_cast<void>(0)
#define QS_FUN(fun_) static_cast<void>(0)
#define QS_SIG_DICTIONARY(sig_, obj_) static_cast<void>(0)
#define QS_OBJ_DICTIONARY(obj_) static_cast<void>(0)
#define QS_OBJ_ARR_DICTIONARY(obj_, idx_) static_cast<void>(0)
#define QS_FUN_DICTIONARY(fun_) static_cast<void>(0)
#define QS_USR_DICTIONARY(rec_) static_cast<void>(0)
#define QS_ENUM_DICTIONARY(value_, group_) static_cast<void>(0)
#define QS_ASSERTION(module_, loc_, delay_) static_cast<void>(0)
#define QS_FLUSH() static_cast<void>(0)
#define QS_TEST_PROBE_DEF(fun_)
#define QS_TEST_PROBE(code_)
#define QS_TEST_PROBE_ID(id_, code_)
#define QS_TEST_PAUSE() static_cast<void>(0)
#define QS_OUTPUT() static_cast<void>(0)
#define QS_RX_INPUT() static_cast<void>(0)
//============================================================================
$declare ${QS::QSpyIdOffsets}
$declare ${QS::QSpyIdGroups}
$declare ${QS::QSpyId}
//============================================================================
// internal QS macros used only in the QP components
#ifdef QP_IMPL
// predefined QS trace records
#define QS_BEGIN_PRE_(rec_, qs_id_) if (false) {
#define QS_END_PRE_() }
#define QS_BEGIN_NOCRIT_PRE_(rec_, qs_id_) if (false) {
#define QS_END_NOCRIT_PRE_() }
#define QS_U8_PRE_(data_) static_cast<void>(0)
#define QS_2U8_PRE_(data1_, data2_) static_cast<void>(0)
#define QS_U16_PRE_(data_) static_cast<void>(0)
#define QS_U32_PRE_(data_) static_cast<void>(0)
#define QS_TIME_PRE_() static_cast<void>(0)
#define QS_SIG_PRE_(sig_) static_cast<void>(0)
#define QS_EVS_PRE_(size_) static_cast<void>(0)
#define QS_OBJ_PRE_(obj_) static_cast<void>(0)
#define QS_FUN_PRE_(fun_) static_cast<void>(0)
#define QS_EQC_PRE_(ctr_) static_cast<void>(0)
#define QS_MPC_PRE_(ctr_) static_cast<void>(0)
#define QS_MPS_PRE_(size_) static_cast<void>(0)
#define QS_TEC_PRE_(ctr_) static_cast<void>(0)
#define QS_CRIT_STAT_
#define QF_QS_CRIT_ENTRY() static_cast<void>(0)
#define QF_QS_CRIT_EXIT() static_cast<void>(0)
#define QF_QS_ISR_ENTRY(isrnest_, prio_) static_cast<void>(0)
#define QF_QS_ISR_EXIT(isrnest_, prio_) static_cast<void>(0)
#define QF_QS_ACTION(act_) static_cast<void>(0)
#endif // QP_IMPL
#endif // QP_INC_QS_DUMMY_HPP_
//! @file
//! @brief Internal (package scope) QS/C++ interface.
#ifndef QP_INC_QS_PKG_HPP_
#define QP_INC_QS_PKG_HPP_
//============================================================================
namespace QP {
//! QS received record types (RX channel)
//!
//! @details
//! This enumeration specifies the record types for the QS receive channel
enum QSpyRxRecords : std::uint8_t {
QS_RX_INFO, //!< query Target info (ver, config, tstamp)
QS_RX_COMMAND, //!< execute a user-defined command in the Target
QS_RX_RESET, //!< reset the Target
QS_RX_TICK, //!< call QF_tick()
QS_RX_PEEK, //!< peek Target memory
QS_RX_POKE, //!< poke Target memory
QS_RX_FILL, //!< fill Target memory
QS_RX_TEST_SETUP, //!< test setup
QS_RX_TEST_TEARDOWN, //!< test teardown
QS_RX_TEST_PROBE, //!< set a Test-Probe in the Target
QS_RX_GLB_FILTER, //!< set global filters in the Target
QS_RX_LOC_FILTER, //!< set local filters in the Target
QS_RX_AO_FILTER, //!< set local AO filter in the Target
QS_RX_CURR_OBJ, //!< set the "current-object" in the Target
QS_RX_TEST_CONTINUE, //!< continue a test after QS_RX_TEST_WAIT()
QS_RX_QUERY_CURR, //!< query the "current object" in the Target
QS_RX_EVENT //!< inject an event to the Target (post/publish)
};
//! @brief Frame character of the QS output protocol
constexpr std::uint8_t QS_FRAME = 0x7EU;
//! @brief Escape character of the QS output protocol
constexpr std::uint8_t QS_ESC = 0x7DU;
//! @brief Escape modifier of the QS output protocol
//!
//! @details
//! The escaped byte is XOR-ed with the escape modifier before it is inserted
//! into the QS buffer.
constexpr std::uint8_t QS_ESC_XOR = 0x20U;
//! @brief Escape character of the QS output protocol
constexpr std::uint8_t QS_GOOD_CHKSUM = 0xFFU;
} // namespace QP
//============================================================================
// Macros for use inside other macros or internally in the QP code
//! Internal QS macro to insert an un-escaped byte into the QS buffer
#define QS_INSERT_BYTE_(b_) \
buf_[head_] = (b_); \
++head_; \
if (head_ == end_) { \
head_ = 0U; \
}
//! Internal QS macro to insert an escaped byte into the QS buffer
#define QS_INSERT_ESC_BYTE_(b_) \
chksum_ += (b_); \
if (((b_) != QS_FRAME) && ((b_) != QS_ESC)) { \
QS_INSERT_BYTE_(b_) \
} \
else { \
QS_INSERT_BYTE_(QS_ESC) \
QS_INSERT_BYTE_(static_cast<std::uint8_t>((b_) ^ QS_ESC_XOR)) \
priv_.used = (priv_.used + 1U); \
}
//! Internal QS macro to begin a predefined QS record with critical section.
//! @note
//! This macro is intended to use only inside QP components and NOT
//! at the application level.
//! @sa QS_BEGIN_ID()
//!
#define QS_BEGIN_PRE_(rec_, qs_id_) \
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_CRIT_E_(); \
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(rec_));
//! Internal QS macro to end a predefined QS record with critical section.
//! @note
//! This macro is intended to use only inside QP components and NOT
//! at the application level.
//! @sa QS_END()
//!
#define QS_END_PRE_() \
QP::QS::endRec_(); \
QS_CRIT_X_(); \
}
//! Internal QS macro to begin a predefined QS record without critical section
//!
//! @note
//! This macro is intended to use only inside QP components and NOT
//! at the application level.
//! @sa QS_BEGIN_NOCRIT_PRE_()
#define QS_BEGIN_NOCRIT_PRE_(rec_, qs_id_) \
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(rec_));
//! Internal QS macro to end a predefiend QS record without critical section
//!
//! @note
//! This macro is intended to use only inside QP components and NOT
//! at the application level. @sa #QS_END_NOCRIT
#define QS_END_NOCRIT_PRE_() \
QP::QS::endRec_(); \
}
#if (Q_SIGNAL_SIZE == 1U)
//! Internal QS macro to output an unformatted event signal data element
//! @note
//! The size of the pointer depends on the macro #Q_SIGNAL_SIZE.
#define QS_SIG_PRE_(sig_) \
(QP::QS::u8_raw_(static_cast<std::uint8_t>(sig_)))
#elif (Q_SIGNAL_SIZE == 2U)
#define QS_SIG_PRE_(sig_) \
(QP::QS::u16_raw_(static_cast<std::uint16_t>(sig_)))
#elif (Q_SIGNAL_SIZE == 4U)
#define QS_SIG_PRE_(sig_) \
(QP::QS::u32_raw_(static_cast<std::uint32_t>(sig_)))
#endif
//! Internal QS macro to output an unformatted uint8_t data element
#define QS_U8_PRE_(data_) \
(QP::QS::u8_raw_(static_cast<std::uint8_t>(data_)))
//! Internal QS macro to output 2 unformatted uint8_t data elements
#define QS_2U8_PRE_(data1_, data2_) \
(QP::QS::u8u8_raw_(static_cast<std::uint8_t>(data1_), \
static_cast<std::uint8_t>(data2_)))
//! Internal QS macro to output an unformatted uint16_t data element
#define QS_U16_PRE_(data_) \
(QP::QS::u16_raw_(static_cast<std::uint16_t>(data_)))
//! Internal QS macro to output an unformatted uint32_t data element
#define QS_U32_PRE_(data_) \
(QP::QS::u32_raw_(static_cast<std::uint32_t>(data_)))
//! Internal QS macro to output a zero-terminated ASCII string
//! data element
#define QS_STR_PRE_(msg_) (QP::QS::str_raw_(msg_))
//! Internal QS macro to output object pointer data element
#define QS_OBJ_PRE_(obj_) (QP::QS::obj_raw_(obj_))
#if (QS_FUN_PTR_SIZE == 1U)
#define QS_FUN_PRE_(fun_) \
(QP::QS::u8_raw_(reinterpret_cast<std::uint8_t>(fun_)))
#elif (QS_FUN_PTR_SIZE == 2U)
#define QS_FUN_PRE_(fun_) \
(QP::QS::u16_raw_(reinterpret_cast<std::uint16_t>(fun_)))
#elif (QS_FUN_PTR_SIZE == 4U)
#define QS_FUN_PRE_(fun_) \
(QP::QS::u32_raw_(reinterpret_cast<std::uint32_t>(fun_)))
#elif (QS_FUN_PTR_SIZE == 8U)
#define QS_FUN_PRE_(fun_) \
(QP::QS::u64_raw_(reinterpret_cast<std::uint64_t>(fun_)))
#else
//! Internal QS macro to output an unformatted function pointer
//! data element
//!
//! @note
//! The size of the pointer depends on the macro #QS_FUN_PTR_SIZE.
//! If the size is not defined the size of pointer is assumed 4-bytes.
#define QS_FUN_PRE_(fun_) \
(QP::QS::u32_raw_(reinterpret_cast<std::uint32_t>(fun_)))
#endif
#if (QF_EQUEUE_CTR_SIZE == 1U)
//! Internal QS macro to output an unformatted event queue
//! counter data element
//! @note the counter size depends on the macro #QF_EQUEUE_CTR_SIZE.
#define QS_EQC_PRE_(ctr_) \
QS::u8_raw_(static_cast<std::uint8_t>(ctr_))
#elif (QF_EQUEUE_CTR_SIZE == 2U)
#define QS_EQC_PRE_(ctr_) \
QS::u16_raw_(static_cast<std::uint16_t>(ctr_))
#elif (QF_EQUEUE_CTR_SIZE == 4U)
#define QS_EQC_PRE_(ctr_) \
QS::u32_raw_(static_cast<std::uint32_t>(ctr_))
#else
#error "QF_EQUEUE_CTR_SIZE not defined"
#endif
#if (QF_EVENT_SIZ_SIZE == 1U)
//! Internal QS macro to output an unformatted event size
//! data element
//!
//! @note the event size depends on the macro #QF_EVENT_SIZ_SIZE.
#define QS_EVS_PRE_(size_) \
QS::u8_raw_(static_cast<std::uint8_t>(size_))
#elif (QF_EVENT_SIZ_SIZE == 2U)
#define QS_EVS_PRE_(size_) \
QS::u16_raw_(static_cast<std::uint16_t>(size_))
#elif (QF_EVENT_SIZ_SIZE == 4U)
#define QS_EVS_PRE_(size_) \
QS::u32_raw_(static_cast<std::uint32_t>(size_))
#endif
#if (QF_MPOOL_SIZ_SIZE == 1U)
//! Internal QS macro to output an unformatted memory pool
//! block-size data element
//! @note the block-size depends on the macro #QF_MPOOL_SIZ_SIZE.
#define QS_MPS_PRE_(size_) \
QS::u8_raw_(static_cast<std::uint8_t>(size_))
#elif (QF_MPOOL_SIZ_SIZE == 2U)
#define QS_MPS_PRE_(size_) \
QS::u16_raw_(static_cast<std::uint16_t>(size_))
#elif (QF_MPOOL_SIZ_SIZE == 4U)
#define QS_MPS_PRE_(size_) \
QS::u32_raw_(static_cast<std::uint32_t>(size_))
#endif
#if (QF_MPOOL_CTR_SIZE == 1U)
//! Internal QS macro to output an unformatted memory pool
//! block-counter data element
//! @note the counter size depends on the macro #QF_MPOOL_CTR_SIZE.
#define QS_MPC_PRE_(ctr_) \
QS::u8_raw_(static_cast<std::uint8_t>(ctr_))
#elif (QF_MPOOL_CTR_SIZE == 2U)
#define QS_MPC_PRE_(ctr_) \
QS::u16_raw_(static_cast<std::uint16_t>(ctr_))
#elif (QF_MPOOL_CTR_SIZE == 4U)
#define QS_MPC_PRE_(ctr_) \
QS::u32_raw_(static_cast<std::uint32_t>(ctr_))
#endif
#if (QF_TIMEEVT_CTR_SIZE == 1U)
//! Internal QS macro to output an unformatted time event
//! tick-counter data element
//! @note the counter size depends on the macro #QF_TIMEEVT_CTR_SIZE.
#define QS_TEC_PRE_(ctr_) \
QS::u8_raw_(static_cast<std::uint8_t>(ctr_))
#elif (QF_TIMEEVT_CTR_SIZE == 2U)
#define QS_TEC_PRE_(ctr_) \
QS::u16_raw_(static_cast<std::uint16_t>(ctr_))
#elif (QF_TIMEEVT_CTR_SIZE == 4U)
#define QS_TEC_PRE_(ctr_) \
QS::u32_raw_(static_cast<std::uint32_t>(ctr_))
#endif
//! Internal QS macro to cast enumerated QS record number to uint8_t
//!
//! @note Casting from enum to unsigned char violates the MISRA-C++ 2008 rules
//! 5-2-7, 5-2-8 and 5-2-9. Encapsulating this violation in a macro allows to
//! selectively suppress this specific deviation.
#define QS_REC_NUM_(enum_) (static_cast<std::uint_fast8_t>(enum_))
#endif // QP_INC_QS_PKG_HPP_
//! @file
//! @brief QP/C++ public interface including backwards-compatibility layer
//!
//! @details
//! This header file must be included directly or indirectly
//! in all application modules (*.cpp files) that use QP/C++.
#ifndef QP_INC_QPCPP_HPP_
#define QP_INC_QPCPP_HPP_
//============================================================================
#include "qf_port.hpp" // QF/C++ port from the port directory
#include "qassert.h" // QP assertions
#ifdef Q_SPY // software tracing enabled?
#include "qs_port.hpp" // QS/C++ port from the port directory
#else
#include "qs_dummy.hpp" // QS/C++ dummy (inactive) interface
#endif
//============================================================================
#ifndef QP_API_VERSION
//! Macro that specifies the backwards compatibility with the
//! QP/C++ API version.
//!
//! @details
//! For example, QP_API_VERSION=540 will cause generating the compatibility
//! layer with QP/C++ version 5.4.0 and newer, but not older than 5.4.0.
//! QP_API_VERSION=0 causes generation of the compatibility layer "from the
//! begining of time", which is the maximum backwards compatibilty. This is
//! the default.<br>
//!
//! Conversely, QP_API_VERSION=9999 means that no compatibility layer should
//! be generated. This setting is useful for checking if an application
//! complies with the latest QP/C++ API.
#define QP_API_VERSION 0
#endif // QP_API_VERSION
// QP/C++ API compatibility layer...
#if (QP_API_VERSION < 700)
//! @deprecated use QP::QF::NO_MARGIN instead
#define QF_NO_MARGIN QP::QF::NO_MARGIN
//! @deprecated plain 'char' is no longer forbidden in MISRA/AUTOSAR-C++
using char_t = char;
//============================================================================
#if (QP_API_VERSION < 691)
//! @deprecated enable the QS global filter
#define QS_FILTER_ON(rec_) QS_GLB_FILTER((rec_))
//! @deprecated disable the QS global filter
#define QS_FILTER_OFF(rec_) QS_GLB_FILTER(-(rec_))
//! @deprecated enable the QS local filter for SM (state machine) object
#define QS_FILTER_SM_OBJ(obj_) (static_cast<void>(0))
//! @deprecated enable the QS local filter for AO (active objects)
#define QS_FILTER_AO_OBJ(obj_) (static_cast<void>(0))
//! @deprecated enable the QS local filter for MP (memory pool) object
#define QS_FILTER_MP_OBJ(obj_) (static_cast<void>(0))
//! @deprecated enable the QS local filter for EQ (event queue) object
#define QS_FILTER_EQ_OBJ(obj_) (static_cast<void>(0))
//! @deprecated enable the QS local filter for TE (time event) object
#define QS_FILTER_TE_OBJ(obj_) (static_cast<void>(0))
#ifdef Q_SPY
//! @deprecated local Filter for a generic application object `obj_`.
#define QS_FILTER_AP_OBJ(obj_) \
(QP::QS::priv_.locFilter_AP = (obj_))
//! @deprecated begin of a user QS record, instead use QS_BEGIN_ID()
#define QS_BEGIN(rec_, obj_) \
if (QS_GLB_FILTER_(rec_) && \
((QP::QS::priv_.locFilter[QP::QS::AP_OBJ] == nullptr) \
|| (QP::QS::priv_.locFilter_AP == (obj_)))) \
{ \
QS_CRIT_STAT_ \
QS_CRIT_E_(); \
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(rec_)); \
QS_TIME_PRE_();
//! @deprecated output hex-formatted std::uint32_t to the QS record
#define QS_U32_HEX(width_, data_) \
(QP::QS::u32_fmt_(static_cast<std::uint8_t>( \
(static_cast<std::uint8_t>((width_) << 4)) | QS_HEX_FMT), (data_)))
#else
#define QS_FILTER_AP_OBJ(obj_) (static_cast<void>(0))
#define QS_BEGIN(rec_, obj_) if (false) {
#define QS_U32_HEX(width_, data_) (Q_UNUSED_PAR(0))
#endif // def Q_SPY
//============================================================================
#if (QP_API_VERSION < 680)
//! @deprecated
//! Macro to specify a transition in the "me->" impl-strategy.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! you call tran(Q_STATE_CAST(target_)).
#define Q_TRAN(target_) (me->tran(Q_STATE_CAST(target_)))
//! @deprecated
//! Macro to specify a tran-to-history in the "me->" impl-strategy.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! you call tran_hist(Q_STATE_CAST(hist_)).
#define Q_TRAN_HIST(hist_) (me->tran_hist((hist_)))
//! @deprecated
//! Macro to specify the superstate in the "me->" impl-strategy.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! you call super(state_)).
#define Q_SUPER(state_) (me->super(Q_STATE_CAST(state_)))
//! @deprecated
//! Macro to call in a QM state entry-handler. Applicable only to QMSMs.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_entry(Q_STATE_CAST(state_)).
#define QM_ENTRY(state_) (me->qm_entry((state_)))
//! @deprecated
//! Macro to call in a QM state exit-handler. Applicable only to QMSMs.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_exit(Q_STATE_CAST(state_)).
#define QM_EXIT(state_) (me->qm_exit((state_)))
//! @deprecated
//! Macro to call in a QM submachine exit-handler. Applicable only to QMSMs.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_sm_exit(Q_STATE_CAST(state_)).
#define QM_SM_EXIT(state_) (me->qm_sm_exit((state_)))
//! @deprecated
//! Macro to call in a QM state-handler when it executes a transition.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_tran((tatbl_)).
#define QM_TRAN(tatbl_) (me->qm_tran((tatbl_)))
//! @deprecated
//! Macro to call in a QM state-handler when it executes an initial tran.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_tran_init((tatbl_)).
#define QM_TRAN_INIT(tatbl_) (me->qm_tran_init((tatbl_)))
//! @deprecated
//! Macro to call in a QM state-handler when it executes a tran-to-history.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_tran_hist((history_), (tatbl_)).
#define QM_TRAN_HIST(history_, tatbl_) \
(me->qm_tran_hist((history_), (tatbl_)))
//! @deprecated
//! Macro to call in a QM state-handler when it executes an initial tran.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_tran_ep((tatbl_)).
#define QM_TRAN_EP(tatbl_) (me->qm_tran_ep((tatbl_)))
//! @deprecated
//! Macro to call in a QM state-handler when it executes a tran-to-exit-point.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_tran_xp((xp_), (tatbl_)).
#define QM_TRAN_XP(xp_, tatbl_) (me->qm_tran_xp((xp_), (tatbl_)))
//! @deprecated
//! Designates the superstate of a given state in a subclass of QP::QMsm.
//! Instead use the new impl-strategy without the "me->" pointer, where
//! the QM-generated code calls qm_super_sub((state_)).
#define QM_SUPER_SUB(state_) (me->qm_super_sub((state_)))
#endif // QP_API_VERSION < 680
#endif // QP_API_VERSION < 691
#endif // QP_API_VERSION < 700
#endif // QP_INC_QPCPP_HPP_
//! @file
//! @brief Application build time-stamp interface
#ifndef QP_INC_QSTAMP_HPP_
#define QP_INC_QSTAMP_HPP_
namespace QP {
extern char const BUILD_DATE[12];
extern char const BUILD_TIME[9];
} // namespace QP
#endif // QP_INC_QSTAMP_HPP_
//! @file
//! @brief QP::QHsm implementation
//!
//! @tr{RQP103} @tr{RQP104} @tr{RQP120} @tr{RQP130}
#define QP_IMPL // this is QP implementation
#include "qep_port.hpp" // QEP port
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
#include "qassert.h" // QP embedded systems-friendly assertions
//============================================================================
$define ${QEP::versionStr[]}
//============================================================================
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qep_hsm")
//----------------------------------------------------------------------------
//! Immutable events corresponding to the reserved signals.
//!
//! @details
//! Static, immutable reserved events that the QEP event processor sends
//! to state handler functions of QHsm-style state machine to execute entry
//! actions, exit actions, and initial transitions.
//!
static QP::QEvt const l_reservedEvt_[4] {
#ifdef Q_EVT_CTOR // Is the QEvt constructor provided?
QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_EMPTY_SIG), 0U),
QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_ENTRY_SIG), 0U),
QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_EXIT_SIG), 0U),
QP::QEvt(static_cast<QP::QSignal>(QP::QHsm::Q_INIT_SIG), 0U)
#else // QEvt is a POD (Plain Old Datatype)
{ static_cast<QP::QSignal>(QP::QHsm::Q_EMPTY_SIG), 0U, 0U },
{ static_cast<QP::QSignal>(QP::QHsm::Q_ENTRY_SIG), 0U, 0U },
{ static_cast<QP::QSignal>(QP::QHsm::Q_EXIT_SIG), 0U, 0U },
{ static_cast<QP::QSignal>(QP::QHsm::Q_INIT_SIG), 0U, 0U }
#endif
};
//----------------------------------------------------------------------------
// inline helper functions
//............................................................................
//! helper function to trigger reserved event in an QHsm
//!
//! @param[in] state state handler function
//! @param[in] sig reserved signal to trigger
static inline QP::QState hsm_reservedEvt_(
QP::QHsm * const me,
QP::QStateHandler const state,
enum QP::QHsm::ReservedSig const sig)
{
return (*state)(me, &l_reservedEvt_[sig]);
}
//............................................................................
//! Helper function to execute entry into a given state in a
//! hierarchical state machine (HSM).
//!
//! @param[in] state state handler function
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
static inline void hsm_state_entry_(
QP::QHsm * const me,
QP::QStateHandler const state,
std::uint_fast8_t const qs_id)
{
#ifdef Q_SPY
if ((*state)(me, &l_reservedEvt_[QP::QHsm::Q_ENTRY_SIG])
== QP::QHsm::Q_RET_HANDLED)
{
QS_CRIT_STAT_
QS_BEGIN_PRE_(QP::QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(me);
QS_FUN_PRE_(state);
QS_END_PRE_()
}
#else
Q_UNUSED_PAR(qs_id);
static_cast<void>((*state)(me, &l_reservedEvt_[QP::QHsm::Q_ENTRY_SIG]));
#endif // Q_SPY
}
//............................................................................
//! Helper function to execute exit from a given state in a
//! hierarchical state machine (HSM).
//!
//! @param[in] state state handler function
//! @param[in] qs_id QS-id of this state machine (for QS local filter)
//!
//! @returns
//! 'true' if the exit action has been found in the state and
//! 'flase' otherwise.
static inline bool hsm_state_exit_(
QP::QHsm * const me,
QP::QStateHandler const state,
std::uint_fast8_t const qs_id)
{
#ifdef Q_SPY
bool isHandled;
if ((*state)(me, &l_reservedEvt_[QP::QHsm::Q_EXIT_SIG])
== QP::QHsm::Q_RET_HANDLED)
{
QS_CRIT_STAT_
QS_BEGIN_PRE_(QP::QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(me);
QS_FUN_PRE_(state);
QS_END_PRE_()
isHandled = true;
}
else {
isHandled = false;
}
return isHandled;
#else
Q_UNUSED_PAR(qs_id);
return (*state)(me, &l_reservedEvt_[QP::QHsm::Q_EXIT_SIG]);
#endif // Q_SPY
}
} // unnamed namespace
$define ${QEP::QHsm}
//! @file
//! @brief QP::QMsm implementation
#define QP_IMPL // this is QP implementation
#include "qep_port.hpp" // QEP port
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
#include "qassert.h" // QP embedded systems-friendly assertions
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qep_msm")
} // unnamed namespace
//============================================================================
$define ${QEP::QMsm}
//! @file
//! @deprecated
//! Empty file kept only for backwards compatibility.
//! @sa qf_qact.cpp
extern char const dummy; // declaration
char const dummy = '\0'; // definition
//! @file
//! @brief QP::QActive native queue operations (based on QP::QEQueue)
//!
//! @attention
//! This qf_actq.cpp source file is only included in the build when the
//! macro #QF_EQUEUE_TYPE is defined as QEQueue. This means that the QP
//! port uses the QP::QEQueue for active objects and so this implementation
//! applies to the QP port.
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
//============================================================================
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_actq")
} // unnamed namespace
//============================================================================
$define ${QF::QActive::post_}
$define ${QF::QActive::postLIFO}
$define ${QF::QActive::get_}
$define ${QF::QF-base::getQueueMin}
//============================================================================
$define ${QF::QTicker}
//! @file
//! @brief QActive::defer(), QActive::recall(), and
//! QActive::flushDeferred() definitions.
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_defer")
} // unnamed namespace
$define ${QF::QActive::defer}
$define ${QF::QActive::recall}
$define ${QF::QActive::flushDeferred}
//! @file
//! @brief QF/C++ dynamic event management
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
#if (QF_MAX_EPOOL > 0U) // dynamic events configured?
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_dyn")
} // unnamed namespace
//============================================================================
$define ${QF::QF-pkg::maxPool_}
$define ${QF::QF-pkg::ePool_[QF_MAX_EPOOL]}
//============================================================================
$define ${QF::QF-dyn}
#endif // (QF_MAX_EPOOL > 0U) dynamic events configured
//! @file
//! @brief QF/C++ memory management services
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_mem")
} // unnamed namespace
$define ${QF::QMPool}
//! @file
//! @brief QP::QActive services and QF support code
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qf_qact")
} // unnamed namespace
//============================================================================
$define ${QF::QActive::registry_[QF_MAX_ACTIVE + 1U]}
$define ${QF::QF-base::intLock_}
$define ${QF::QF-base::intNest_}
$define ${QF::QF-pkg::readySet_}
$define ${QF::QF-pkg::bzero}
//============================================================================
$define ${QF::QActive::QActive}
$define ${QF::QActive::register_}
$define ${QF::QActive::unregister_}
//============================================================================
$define ${QF-types::QPSet::QF_LOG2}
//! @file
//! @brief QMActive::QMActive() and virtual functions
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qassert.h" // QP embedded systems-friendly assertions
//! Internal macro to cast a QP::QMActive pointer `qact_` to QP::QMsm*
//! @note
//! Casting pointer to pointer pointer violates the MISRA-C++ 2008 Rule 5-2-7,
//! cast from pointer to pointer. Additionally this cast violates the MISRA-
//! C++ 2008 Rule 5-2-8 Unusual pointer cast (incompatible indirect types).
//! Encapsulating these violations in a macro allows to selectively suppress
//! this specific deviation.
#define QF_QMACTIVE_TO_QMSM_CAST_(qact_) \
reinterpret_cast<QMsm *>((qact_))
//! Internal macro to cast a QP::QMActive pointer `qact_` to QP::QMsm const *
#define QF_QMACTIVE_TO_QMSM_CONST_CAST_(qact_) \
reinterpret_cast<QMsm const *>((qact_))
// unnamed namespace for local definitions with internal linkage
namespace {
//Q_DEFINE_THIS_MODULE("qf_qmact")
} // unnamed namespace
$define ${QF::QMActive}
//! @file
//! @brief QP::QEQueue implementation
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_qeq")
} // unnamed namespace
$define ${QF::QEQueue}
//! @file
//! @brief QF/C++ Publish-Subscribe services
//! definitions.
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_ps")
} // unnamed namespace
$define ${QF::QActive::subscrList_}
$define ${QF::QActive::maxPubSignal_}
$define ${QF::QActive::psInit}
$define ${QF::QActive::publish_}
$define ${QF::QActive::subscribe}
$define ${QF::QActive::unsubscribe}
$define ${QF::QActive::unsubscribeAll}
//! @file
//! @brief QF/C++ time events and time management services
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qf_time")
} // unnamed namespace
$define ${QF::QTimeEvt}
//! @file
//! @brief Cooperative QV kernel implementation.
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QV_HPP_
#error "Source file included in a project NOT based on the QV kernel"
#endif // QP_INC_QV_HPP_
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qv")
} // unnamed namespace
//============================================================================
$define ${QV::QV-base}
$define ${QV::QF-cust}
$define ${QV::QActive}
//! @file
//! @brief QK/C++ preemptive kernel core functions
#define QP_IMPL // this is QF/QK implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QK_HPP_
#error "Source file included in a project NOT based on the QK kernel"
#endif // QP_INC_QK_HPP_
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qk")
} // unnamed namespace
//============================================================================
$define ${QK::QK-base}
$define ${QK::QF-cust}
$define ${QK::QActive}
//============================================================================
extern "C" {
$define ${QK-extern-C}
} // extern "C"
//! @file
//! @brief QXK/C++ preemptive kernel core functions
//! public interface.
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QXK_HPP_
#error "Source file included in a project NOT based on the QXK kernel"
#endif // QP_INC_QXK_HPP_
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qxk")
} // unnamed namespace
//============================================================================
$define ${QXK::QXK-base}
$define ${QXK::QF-cust}
$define ${QXK::QActive}
//============================================================================
extern "C" {
$define ${QXK-extern-C}
} // extern "C"
//! @file
//! @brief Priority-ceiling blocking mutex QP::QXMutex class definition
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QXK_HPP_
#error "Source file included in a project NOT based on the QXK kernel"
#endif // QP_INC_QXK_HPP_
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qxk_mutex")
} // unnamed namespace
$define ${QXK::QXMutex}
//! @file
//! @brief QXK/C++ preemptive kernel counting semaphore implementation
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QXK_HPP_
#error "Source file included in a project NOT based on the QXK kernel"
#endif // QP_INC_QXK_HPP_
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qxk_sema")
} // unnamed namespace
//============================================================================
$define ${QXK::QXSemaphore}
//! @file
//! @brief QXK/C++ preemptive kernel extended (blocking) thread implementation
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#ifdef Q_SPY // QS software tracing enabled?
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS facilities for pre-defined trace records
#else
#include "qs_dummy.hpp" // disable the QS software tracing
#endif // Q_SPY
// protection against including this source file in a wrong project
#ifndef QP_INC_QXK_HPP_
#error "Source file included in a project NOT based on the QXK kernel"
#endif // QP_INC_QXK_HPP_
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qxk_xthr")
} // unnamed namespace
//============================================================================
$define ${QXK::QXThread}
//! @file
//! @brief QS software tracing services
#define QP_IMPL // this is QP implementation
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS package-scope internal interface
#include "qstamp.hpp" // QP time-stamp
#include "qassert.h" // QP assertions
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qs")
} // unnamed namespace
$define ${QS::QS-tx}
//! @file
//! @brief QS long-long (64-bit) output
#define QP_IMPL // this is QF/QK implementation
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS package-scope internal interface
$define ${QS::QS-tx-64bit}
//! @file
//! @brief QS floating point output implementation
#define QP_IMPL // this is QF/QK implementation
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS package-scope internal interface
$define ${QS::QS-tx-fp}
//! @file
//! @brief QS receive channel services
#define QP_IMPL // this is QP implementation
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS package-scope internal interface
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP assertions
static_assert(QP::QS::MAX_OBJ <= 8U, "QS::MAX_OBJECT below the limit");
//============================================================================
namespace { // unnamed local namespace
Q_DEFINE_THIS_MODULE("qs_rx")
//............................................................................
#if (QS_OBJ_PTR_SIZE == 1U)
using QSObj = std::uint8_t;
#elif (QS_OBJ_PTR_SIZE == 2U)
using QSObj = std::uint16_t;
#elif (QS_OBJ_PTR_SIZE == 4U)
using QSObj = std::uint32_t;
#elif (QS_OBJ_PTR_SIZE == 8U)
using QSObj = std::uint64_t;
#endif
//! @cond
//! Exclude the following internals from the Doxygen documentation
//! Extended-state variables used for parsing various QS-RX Records
struct CmdVar {
std::uint32_t param1;
std::uint32_t param2;
std::uint32_t param3;
std::uint8_t idx;
std::uint8_t cmdId;
};
struct TickVar {
std::uint_fast8_t rate;
};
struct PeekVar {
std::uint16_t offs;
std::uint8_t size;
std::uint8_t num;
std::uint8_t idx;
};
struct PokeVar {
std::uint32_t data;
std::uint16_t offs;
std::uint8_t size;
std::uint8_t num;
std::uint8_t idx;
std::uint8_t fill;
};
struct FltVar {
std::uint8_t data[16];
std::uint8_t idx;
std::uint8_t recId; // global/local
};
struct ObjVar {
QSObj addr;
std::uint8_t idx;
std::uint8_t kind; // see qs.hpp, enum QSpyObjKind
std::uint8_t recId;
};
struct EvtVar {
QP::QEvt *e;
std::uint8_t *p;
QP::QSignal sig;
std::uint16_t len;
std::uint8_t prio;
std::uint8_t idx;
};
// extended-state variables for the current QS-RX state
static struct ExtState {
union Variant {
CmdVar cmd;
TickVar tick;
PeekVar peek;
PokeVar poke;
FltVar flt;
ObjVar obj;
EvtVar evt;
#ifdef Q_UTEST
QP::QS::TProbe tp;
#endif // Q_UTEST
} var;
std::uint8_t state;
std::uint8_t esc;
std::uint8_t seq;
std::uint8_t chksum;
} l_rx;
enum RxStateEnum : std::uint8_t {
ERROR_STATE,
WAIT4_SEQ,
WAIT4_REC,
WAIT4_INFO_FRAME,
WAIT4_CMD_ID,
WAIT4_CMD_PARAM1,
WAIT4_CMD_PARAM2,
WAIT4_CMD_PARAM3,
WAIT4_CMD_FRAME,
WAIT4_RESET_FRAME,
WAIT4_TICK_RATE,
WAIT4_TICK_FRAME,
WAIT4_PEEK_OFFS,
WAIT4_PEEK_SIZE,
WAIT4_PEEK_NUM,
WAIT4_PEEK_FRAME,
WAIT4_POKE_OFFS,
WAIT4_POKE_SIZE,
WAIT4_POKE_NUM,
WAIT4_POKE_DATA,
WAIT4_POKE_FRAME,
WAIT4_FILL_DATA,
WAIT4_FILL_FRAME,
WAIT4_FILTER_LEN,
WAIT4_FILTER_DATA,
WAIT4_FILTER_FRAME,
WAIT4_OBJ_KIND,
WAIT4_OBJ_ADDR,
WAIT4_OBJ_FRAME,
WAIT4_QUERY_KIND,
WAIT4_QUERY_FRAME,
WAIT4_EVT_PRIO,
WAIT4_EVT_SIG,
WAIT4_EVT_LEN,
WAIT4_EVT_PAR,
WAIT4_EVT_FRAME,
#ifdef Q_UTEST
WAIT4_TEST_SETUP_FRAME,
WAIT4_TEST_TEARDOWN_FRAME,
WAIT4_TEST_PROBE_DATA,
WAIT4_TEST_PROBE_ADDR,
WAIT4_TEST_PROBE_FRAME,
WAIT4_TEST_CONTINUE_FRAME,
#endif // Q_UTEST
};
// internal helper functions...
static void rxParseData_(std::uint8_t const b) noexcept;
static void rxHandleBadFrame_(std::uint8_t const state) noexcept;
static void rxReportAck_(enum QP::QSpyRxRecords const recId) noexcept;
static void rxReportError_(std::uint8_t const code) noexcept;
static void rxReportDone_(enum QP::QSpyRxRecords const recId) noexcept;
static void rxPoke_(void) noexcept;
//! Internal QS-RX function to take a transition in the QS-RX FSM
static inline void tran_(RxStateEnum const target) noexcept {
l_rx.state = static_cast<std::uint8_t>(target);
}
//! @endcond
} // unnamed namespace
//============================================================================
$define ${QS::QS-rx}
//============================================================================
namespace { // unnamed local namespace
//............................................................................
static void rxParseData_(std::uint8_t const b) noexcept {
switch (l_rx.state) {
case WAIT4_SEQ: {
++l_rx.seq;
if (l_rx.seq != b) { // not the expected sequence?
rxReportError_(0x42U);
l_rx.seq = b; // update the sequence
}
tran_(WAIT4_REC);
break;
}
case WAIT4_REC: {
switch (b) {
case QP::QS_RX_INFO:
tran_(WAIT4_INFO_FRAME);
break;
case QP::QS_RX_COMMAND:
tran_(WAIT4_CMD_ID);
break;
case QP::QS_RX_RESET:
tran_(WAIT4_RESET_FRAME);
break;
case QP::QS_RX_TICK:
tran_(WAIT4_TICK_RATE);
break;
case QP::QS_RX_PEEK:
if (QP::QS::rxPriv_.currObj[QP::QS::AP_OBJ] != nullptr) {
l_rx.var.peek.offs = 0U;
l_rx.var.peek.idx = 0U;
tran_(WAIT4_PEEK_OFFS);
}
else {
rxReportError_(
static_cast<std::uint8_t>(QP::QS_RX_PEEK));
tran_(ERROR_STATE);
}
break;
case QP::QS_RX_POKE:
case QP::QS_RX_FILL:
l_rx.var.poke.fill =
(b == static_cast<std::uint8_t>(QP::QS_RX_FILL))
? 1U
: 0U;
if (QP::QS::rxPriv_.currObj[QP::QS::AP_OBJ] != nullptr) {
l_rx.var.poke.offs = 0U;
l_rx.var.poke.idx = 0U;
tran_(WAIT4_POKE_OFFS);
}
else {
rxReportError_(
(l_rx.var.poke.fill != 0U)
? static_cast<std::uint8_t>(QP::QS_RX_FILL)
: static_cast<std::uint8_t>(QP::QS_RX_POKE));
tran_(ERROR_STATE);
}
break;
case QP::QS_RX_GLB_FILTER: // intentionally fall-through
case QP::QS_RX_LOC_FILTER:
l_rx.var.flt.recId = b;
tran_(WAIT4_FILTER_LEN);
break;
case QP::QS_RX_AO_FILTER: // intentionally fall-through
case QP::QS_RX_CURR_OBJ:
l_rx.var.obj.recId = b;
tran_(WAIT4_OBJ_KIND);
break;
case QP::QS_RX_QUERY_CURR:
l_rx.var.obj.recId =
static_cast<std::uint8_t>(QP::QS_RX_QUERY_CURR);
tran_(WAIT4_QUERY_KIND);
break;
case QP::QS_RX_EVENT:
tran_(WAIT4_EVT_PRIO);
break;
#ifdef Q_UTEST
case QP::QS_RX_TEST_SETUP:
tran_(WAIT4_TEST_SETUP_FRAME);
break;
case QP::QS_RX_TEST_TEARDOWN:
tran_(WAIT4_TEST_TEARDOWN_FRAME);
break;
case QP::QS_RX_TEST_CONTINUE:
tran_(WAIT4_TEST_CONTINUE_FRAME);
break;
case QP::QS_RX_TEST_PROBE:
if (QP::QS::testData.tpNum
< static_cast<std::uint8_t>(
(sizeof(QP::QS::testData.tpBuf)
/ sizeof(QP::QS::testData.tpBuf[0]))))
{
l_rx.var.tp.data = 0U;
l_rx.var.tp.idx = 0U;
tran_(WAIT4_TEST_PROBE_DATA);
}
else { // the number of Test-Probes exceeded
rxReportError_(
static_cast<std::uint8_t>(QP::QS_RX_TEST_PROBE));
tran_(ERROR_STATE);
}
break;
#endif // Q_UTEST
default:
rxReportError_(0x43U);
tran_(ERROR_STATE);
break;
}
break;
}
case WAIT4_INFO_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_CMD_ID: {
l_rx.var.cmd.cmdId = b;
l_rx.var.cmd.idx = 0U;
l_rx.var.cmd.param1 = 0U;
l_rx.var.cmd.param2 = 0U;
l_rx.var.cmd.param3 = 0U;
tran_(WAIT4_CMD_PARAM1);
break;
}
case WAIT4_CMD_PARAM1: {
l_rx.var.cmd.param1 |=
(static_cast<std::uint32_t>(b) << l_rx.var.cmd.idx);
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U*4U)) {
l_rx.var.cmd.idx = 0U;
tran_(WAIT4_CMD_PARAM2);
}
break;
}
case WAIT4_CMD_PARAM2: {
l_rx.var.cmd.param2 |=
static_cast<std::uint32_t>(b) << l_rx.var.cmd.idx;
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U*4U)) {
l_rx.var.cmd.idx = 0U;
tran_(WAIT4_CMD_PARAM3);
}
break;
}
case WAIT4_CMD_PARAM3: {
l_rx.var.cmd.param3 |=
static_cast<std::uint32_t>(b) << l_rx.var.cmd.idx;
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U*4U)) {
l_rx.var.cmd.idx = 0U;
tran_(WAIT4_CMD_FRAME);
}
break;
}
case WAIT4_CMD_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_RESET_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_TICK_RATE: {
l_rx.var.tick.rate = static_cast<std::uint_fast8_t>(b);
tran_(WAIT4_TICK_FRAME);
break;
}
case WAIT4_TICK_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_PEEK_OFFS: {
if (l_rx.var.peek.idx == 0U) {
l_rx.var.peek.offs = static_cast<std::uint16_t>(b);
l_rx.var.peek.idx += 8U;
}
else {
l_rx.var.peek.offs |= static_cast<std::uint16_t>(
static_cast<std::uint16_t>(b) << 8U);
tran_(WAIT4_PEEK_SIZE);
}
break;
}
case WAIT4_PEEK_SIZE: {
if ((b == 1U) || (b == 2U) || (b == 4U)) {
l_rx.var.peek.size = b;
tran_(WAIT4_PEEK_NUM);
}
else {
rxReportError_(static_cast<std::uint8_t>(QP::QS_RX_PEEK));
tran_(ERROR_STATE);
}
break;
}
case WAIT4_PEEK_NUM: {
l_rx.var.peek.num = b;
tran_(WAIT4_PEEK_FRAME);
break;
}
case WAIT4_PEEK_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_POKE_OFFS: {
if (l_rx.var.poke.idx == 0U) {
l_rx.var.poke.offs = static_cast<std::uint16_t>(b);
l_rx.var.poke.idx = 1U;
}
else {
l_rx.var.poke.offs |= static_cast<std::uint16_t>(
static_cast<std::uint16_t>(b) << 8U);
tran_(WAIT4_POKE_SIZE);
}
break;
}
case WAIT4_POKE_SIZE: {
if ((b == 1U)
|| (b == 2U)
|| (b == 4U))
{
l_rx.var.poke.size = b;
tran_(WAIT4_POKE_NUM);
}
else {
rxReportError_((l_rx.var.poke.fill != 0U)
? static_cast<std::uint8_t>(QP::QS_RX_FILL)
: static_cast<std::uint8_t>(QP::QS_RX_POKE));
tran_(ERROR_STATE);
}
break;
}
case WAIT4_POKE_NUM: {
if (b > 0U) {
l_rx.var.poke.num = b;
l_rx.var.poke.data = 0U;
l_rx.var.poke.idx = 0U;
tran_((l_rx.var.poke.fill != 0U)
? WAIT4_FILL_DATA
: WAIT4_POKE_DATA);
}
else {
rxReportError_((l_rx.var.poke.fill != 0U)
? static_cast<std::uint8_t>(QP::QS_RX_FILL)
: static_cast<std::uint8_t>(QP::QS_RX_POKE));
tran_(ERROR_STATE);
}
break;
}
case WAIT4_FILL_DATA: {
l_rx.var.poke.data |=
static_cast<std::uint32_t>(b) << l_rx.var.poke.idx;
l_rx.var.poke.idx += 8U;
if ((l_rx.var.poke.idx >> 3U) == l_rx.var.poke.size) {
tran_(WAIT4_FILL_FRAME);
}
break;
}
case WAIT4_POKE_DATA: {
l_rx.var.poke.data |=
static_cast<std::uint32_t>(b) << l_rx.var.poke.idx;
l_rx.var.poke.idx += 8U;
if ((l_rx.var.poke.idx >> 3U) == l_rx.var.poke.size) {
rxPoke_();
--l_rx.var.poke.num;
if (l_rx.var.poke.num == 0U) {
tran_(WAIT4_POKE_FRAME);
}
}
break;
}
case WAIT4_FILL_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_POKE_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_FILTER_LEN: {
if (b == static_cast<std::uint8_t>(sizeof(l_rx.var.flt.data))) {
l_rx.var.flt.idx = 0U;
tran_(WAIT4_FILTER_DATA);
}
else {
rxReportError_(l_rx.var.flt.recId);
tran_(ERROR_STATE);
}
break;
}
case WAIT4_FILTER_DATA: {
l_rx.var.flt.data[l_rx.var.flt.idx] = b;
++l_rx.var.flt.idx;
if (l_rx.var.flt.idx == sizeof(l_rx.var.flt.data)) {
tran_(WAIT4_FILTER_FRAME);
}
break;
}
case WAIT4_FILTER_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_OBJ_KIND: {
if (b <= static_cast<std::uint8_t>(QP::QS::SM_AO_OBJ)) {
l_rx.var.obj.kind = b;
l_rx.var.obj.addr = 0U;
l_rx.var.obj.idx = 0U;
tran_(WAIT4_OBJ_ADDR);
}
else {
rxReportError_(l_rx.var.obj.recId);
tran_(ERROR_STATE);
}
break;
}
case WAIT4_OBJ_ADDR: {
l_rx.var.obj.addr |=
static_cast<QSObj>(b) << l_rx.var.obj.idx;
l_rx.var.obj.idx += 8U;
if (l_rx.var.obj.idx
== (8U * static_cast<unsigned>(QS_OBJ_PTR_SIZE)))
{
tran_(WAIT4_OBJ_FRAME);
}
break;
}
case WAIT4_OBJ_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_QUERY_KIND: {
if (b < static_cast<std::uint8_t>(QP::QS::MAX_OBJ)) {
l_rx.var.obj.kind = b;
tran_(WAIT4_QUERY_FRAME);
}
else {
rxReportError_(l_rx.var.obj.recId);
tran_(ERROR_STATE);
}
break;
}
case WAIT4_QUERY_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_EVT_PRIO: {
l_rx.var.evt.prio = b;
l_rx.var.evt.sig = 0U;
l_rx.var.evt.idx = 0U;
tran_(WAIT4_EVT_SIG);
break;
}
case WAIT4_EVT_SIG: {
l_rx.var.evt.sig |= static_cast<QP::QSignal>(
static_cast<std::uint32_t>(b) << l_rx.var.evt.idx);
l_rx.var.evt.idx += 8U;
if (l_rx.var.evt.idx
== (8U *static_cast<unsigned>(Q_SIGNAL_SIZE)))
{
l_rx.var.evt.len = 0U;
l_rx.var.evt.idx = 0U;
tran_(WAIT4_EVT_LEN);
}
break;
}
case WAIT4_EVT_LEN: {
l_rx.var.evt.len |= static_cast<std::uint16_t>(
static_cast<unsigned>(b) << l_rx.var.evt.idx);
l_rx.var.evt.idx += 8U;
if (l_rx.var.evt.idx == (8U * 2U)) {
if ((l_rx.var.evt.len + sizeof(QP::QEvt))
<= static_cast<std::uint16_t>(
QP::QF::poolGetMaxBlockSize()))
{
// report Ack before generating any other QS records
rxReportAck_(QP::QS_RX_EVENT);
l_rx.var.evt.e = QP::QF::newX_(
(static_cast<std::uint_fast16_t>(l_rx.var.evt.len)
+ sizeof(QP::QEvt)),
0U, // margin
static_cast<enum_t>(l_rx.var.evt.sig));
// event allocated?
if (l_rx.var.evt.e != nullptr) {
l_rx.var.evt.p =
reinterpret_cast<std::uint8_t *>(l_rx.var.evt.e);
l_rx.var.evt.p = &l_rx.var.evt.p[sizeof(QP::QEvt)];
if (l_rx.var.evt.len > 0U) {
tran_(WAIT4_EVT_PAR);
}
else {
tran_(WAIT4_EVT_FRAME);
}
}
else {
rxReportError_(
static_cast<std::uint8_t>(QP::QS_RX_EVENT));
tran_(ERROR_STATE);
}
}
else {
rxReportError_(
static_cast<std::uint8_t>(QP::QS_RX_EVENT));
tran_(ERROR_STATE);
}
}
break;
}
case WAIT4_EVT_PAR: { // event parameters
*l_rx.var.evt.p = b;
++l_rx.var.evt.p;
--l_rx.var.evt.len;
if (l_rx.var.evt.len == 0U) {
tran_(WAIT4_EVT_FRAME);
}
break;
}
case WAIT4_EVT_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
#ifdef Q_UTEST
case WAIT4_TEST_SETUP_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_TEST_TEARDOWN_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_TEST_CONTINUE_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
case WAIT4_TEST_PROBE_DATA: {
l_rx.var.tp.data |=
(static_cast<QP::QSFun>(b) << l_rx.var.tp.idx);
l_rx.var.tp.idx += 8U;
if (l_rx.var.tp.idx == (8U * sizeof(std::uint32_t))) {
l_rx.var.tp.addr = 0U;
l_rx.var.tp.idx = 0U;
tran_(WAIT4_TEST_PROBE_ADDR);
}
break;
}
case WAIT4_TEST_PROBE_ADDR: {
l_rx.var.tp.addr |=
(static_cast<std::uint32_t>(b) << l_rx.var.tp.idx);
l_rx.var.tp.idx += 8U;
if (l_rx.var.tp.idx
== (8U * static_cast<unsigned>(QS_FUN_PTR_SIZE)))
{
tran_(WAIT4_TEST_PROBE_FRAME);
}
break;
}
case WAIT4_TEST_PROBE_FRAME: {
// keep ignoring the data until a frame is collected
break;
}
#endif // Q_UTEST
case ERROR_STATE: {
// keep ignoring the data until a good frame is collected
break;
}
default: { // unexpected or unimplemented state
rxReportError_(0x45U);
tran_(ERROR_STATE);
break;
}
}
}
//............................................................................
static void rxHandleBadFrame_(std::uint8_t const state) noexcept {
rxReportError_(0x50U); // error for all bad frames
switch (state) {
case WAIT4_EVT_FRAME: {
Q_ASSERT_ID(910, l_rx.var.evt.e != nullptr);
#if (QF_MAX_EPOOL > 0U)
QP::QF::gc(l_rx.var.evt.e); // don't leak an allocated event
#endif
break;
}
default: {
break;
}
}
}
//............................................................................
static void rxReportAck_(enum QP::QSpyRxRecords const recId) noexcept {
QS_CRIT_STAT_
QS_CRIT_E_();
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(QP::QS_RX_STATUS));
QS_U8_PRE_(recId); // record ID
QP::QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
}
//............................................................................
static void rxReportError_(std::uint8_t const code) noexcept {
QS_CRIT_STAT_
QS_CRIT_E_();
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(QP::QS_RX_STATUS));
QS_U8_PRE_(0x80U | code); // error code
QP::QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
}
//............................................................................
static void rxReportDone_(enum QP::QSpyRxRecords const recId) noexcept {
QS_CRIT_STAT_
QS_CRIT_E_();
QP::QS::beginRec_(static_cast<std::uint_fast8_t>(QP::QS_TARGET_DONE));
QS_TIME_PRE_(); // timestamp
QS_U8_PRE_(recId); // record ID
QP::QS::endRec_();
QS_CRIT_X_();
QS_REC_DONE(); // user callback (if defined)
}
//............................................................................
static void rxPoke_(void) noexcept {
std::uint8_t * ptr =
static_cast<std::uint8_t *>(QP::QS::rxPriv_.currObj[QP::QS::AP_OBJ]);
ptr = &ptr[l_rx.var.poke.offs];
switch (l_rx.var.poke.size) {
case 1:
*ptr = static_cast<std::uint8_t>(l_rx.var.poke.data);
break;
case 2:
*reinterpret_cast<std::uint16_t *>(ptr)
= static_cast<std::uint16_t>(l_rx.var.poke.data);
break;
case 4:
*reinterpret_cast<std::uint32_t *>(ptr) = l_rx.var.poke.data;
break;
default:
Q_ERROR_ID(900);
break;
}
l_rx.var.poke.data = 0U;
l_rx.var.poke.idx = 0U;
l_rx.var.poke.offs += static_cast<std::uint16_t>(l_rx.var.poke.size);
}
} // unnamed namespace
//! @file
//! @brief QUTest unit testing harness + QF/++ stub for QUTest
// only build when Q_UTEST is defined
#ifdef Q_UTEST
#define QP_IMPL // this is QP implementation
#include "qf_port.hpp" // QF port
#include "qf_pkg.hpp" // QF package-scope internal interface
#include "qassert.h" // QP embedded systems-friendly assertions
#include "qs_port.hpp" // QS port
#include "qs_pkg.hpp" // QS package-scope internal interface
// unnamed namespace for local definitions with internal linkage
namespace {
Q_DEFINE_THIS_MODULE("qutest")
} // unnamed namespace
//============================================================================
// QUTest unit testing harness
$define ${QUTest}
//============================================================================
namespace QP {
QSTimeCtr QS::onGetTime() {
return (++testData.testTime);
}
} // namespace QP
//============================================================================
extern "C" {
Q_NORETURN Q_onAssert(char const * const module, int_t const location) {
QS_BEGIN_NOCRIT_PRE_(QP::QS_ASSERT_FAIL, 0U)
QS_TIME_PRE_();
QS_U16_PRE_(location);
QS_STR_PRE_((module != nullptr) ? module : "?");
QS_END_NOCRIT_PRE_()
QP::QS::onFlush(); // flush the assertion record to the host
QP::QS::onCleanup(); // cleanup after the failure
QP::QS::onReset(); // reset the target to prevent the code from continuing
for (;;) { // onReset() should not return, but to ensure no-return...
}
}
} // extern "C"
//============================================================================
// QP-stub for QUTest
// NOTE: The QP-stub is needed for unit testing QP applications, but might
// NOT be needed for testing QP itself. In that case, the build process
// can define Q_UTEST=0 to exclude the QP-stub from the build.
//
#if Q_UTEST != 0
$define ${QUTest-stub}
#endif // Q_UTEST != 0
#endif // def Q_UTEST
//! @file
//! @brief Application build time-stamp
//! @note
//! This module needs to be re-compiled in every new software build. To achive
//! this, it is recommended to delete the object file (qstamp.o or qstamp.obj)
//! in the build directory before each build. (Most development tools allow
//! you to specify a pre-build action, which is the ideal place to delete
//! the qstamp object file.)
#include "qstamp.hpp"
namespace QP {
//! the calendar date of the last translation of the form: "Mmm dd yyyy"
char const BUILD_DATE[12] = __DATE__;
//! the time of the last translation of the form: "hh:mm:ss"
char const BUILD_TIME[9] = __TIME__;
} // namespace QP