QP/C Real-Time Embedded Framework (RTEF)
The model is used to generate the whole QP/C source code.
Copyright (C) 2005 Quantum Leaps, LLC <state-machine.com>.
SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-QL-commercial
This software is dual-licensed under the terms of the open source GNU
General Public License version 3 (or any later version), or alternatively,
under the terms of one of the closed source Quantum Leaps commercial
licenses.
The terms of the open source GNU General Public License version 3
can be found at: <www.gnu.org/licenses/gpl-3.0>
The terms of the closed source Quantum Leaps commercial licenses
can be found at: <www.state-machine.com/licensing>
Redistributions in source code must retain this copyright notice.
Plagiarizing this software to sidestep the license obligations is illegal.
Contact information:
<www.state-machine.com/licensing>
<info@state-machine.com>
/*! No-return function specifier for the Q_onAssert() callback function.
*
* The `Q_NORETURN` macro is supposed to be defined in the QP/C port
* (file `qep_port.h`). If such definition is NOT porvided, the default
* definition assumes only `void` type returned from Q_onAssert().
*
* @tr{PQA01_4}
*/
void
/*! Define the file name (with `__FILE__`) for assertions in this file
*
* @details
* Macro to be placed at the top of each C/C++ module to define the
* single instance of the file name string to be used in reporting
* assertions in this module.
*
* @note
* The file name string literal is defined by means of the standard
* preprocessor macro `__FILE__`. However, please note that, depending
* on the compiler, the `__FILE__` macro might contain the whole path name
* to the file, which might be inconvenient to log assertions.
*
* @attention
* This macro should **not** be terminated by a semicolon.
*
* @sa Q_DEFINE_THIS_MODULE()
*/
Q_DEFINE_THIS_MODULE(__FILE__)
/*! Define the user-specified module name for assertions in this file.
*
* @details
* Macro to be placed at the top of each C/C++ module to define the
* single instance of the module name string to be used in reporting
* assertions in this module. This macro takes the user-supplied parameter
* `name_` instead of `__FILE__` to precisely control the name of the
* module.
*
* @param[in] name_ string constant representing the module name
*
* @note
* This macro should **not** be terminated by a semicolon.
*/
\
static char const Q_this_module_[] = name_;
/*! inactive version of Q_DEFINE_THIS_MODULE() */
/*! General purpose assertion with user-specified ID number.
*
* @details
* Makes sure the `test_` parameter is TRUE. Calls the Q_onAssert()
* callback if the `test_` evaluates to FALSE. This assertion takes the
* user-supplied parameter `id_` to identify the location of this
* assertion within the file. This avoids the volatility of using line
* numbers, which change whenever a line of code is added or removed
* upstream from the assertion.
*
* @param[in] id_ ID number (unique within the module) of the assertion
* @param[in] expr_ Boolean expression to check
*
* @note
* The `test_` expression is **not** evaluated if assertions are
* disabled with the ::Q_NASSERT switch.
*/
((expr_) \
? ((void)0) : Q_onAssert(&Q_this_module_[0], (id_)))
/*! inactive version of Q_ASSERT_ID() */
((void)0)
/*! Assertion with user-specified ID for a wrong path through the code
*
* @details
* Calls the Q_onAssert() callback if ever executed. This assertion
* takes the user-supplied parameter `id_` to identify the location of
* this assertion within the file. This avoids the volatility of using
* line numbers, which change whenever a line of code is added or removed
* upstream from the assertion.
*
* @param[in] id_ ID number (unique within the module) of the assertion
*
* @note
* Does noting if assertions are disabled with the ::Q_NASSERT switch.
*/
Q_onAssert(&Q_this_module_[0], (id_))
/*! inactive version of Q_ERROR_ID() */
((void)0)
/*! General purpose assertion with user-specified ID number that
* **always** evaluates the `expr_` expression.
*
* @details
* Like the Q_ASSERT_ID() macro, except it **always** evaluates the
* `expr_` expression even when assertions are disabled with the
* ::Q_NASSERT macro. However, when the ::Q_NASSERT macro is defined, the
* Q_onAssert() callback is **not** called, even if `expr_` evaluates
* to FALSE.
*
* @param[in] id_ ID number (unique within the module) of the assertion
* @param[in] expr_ Boolean expression to check
*/
Q_ASSERT_ID((id_), (expr_))
/*! inactive version of Q_ALLEGE_ID()
*
* @attention
* The expression `expr_` **is** executed.
*/
((void)(expr_))
/*! Assertion for checking preconditions.
*
* @details
* This macro is equivalent to Q_ASSERT_ID(), except the name provides a better
* documentation of the intention of this assertion.
*
* @param[in] id_ ID number (unique within the module) of the assertion
* @param[in] expr_ Boolean expression
*/
Q_ASSERT_ID((id_), (expr_))
/*! Assertion for checking preconditions (based on __LINE__).
*
* @details
* Equivalent to Q_ASSERT(), except the name provides a better documentation
* of the intention of this assertion.
*
* @param[in] expr_ Boolean expression
*/
Q_REQUIRE_ID(__LINE__, (expr_))
/*! Assertion for checking postconditions.
*
* @details
* This macro is equivalent to Q_ASSERT_ID(), except the name provides a better
* documentation of the intention of this assertion.
*
* @param[in] id_ ID number (unique within the module) of the assertion
* @param[in] expr_ Boolean expression
*/
Q_ASSERT_ID((id_), (expr_))
/*! Assertion for checking postconditions.
*
* @details
* Equivalent to Q_ASSERT(), except the name provides a better documentation
* of the intention of this assertion.
*
* @param[in] expr_ Boolean expression
*/
Q_ENSURE_ID(__LINE__, (expr_))
/*! Assertion for checking invariants.
*
* @details
* Equivalent to Q_ASSERT(), except the name provides a better documentation
* of the intention of this assertion.
*
* @param[in] id_ ID number (unique within the module) of the assertion
* @param[in] expr_ Boolean expression
*/
Q_ASSERT_ID((id_), (expr_))
/*! Assertion for checking invariants.
*
* @details
* Equivalent to Q_ASSERT(), except the name provides a better documentation
* of the intention of this assertion.
*
* @param[in] expr_ Boolean expression
*/
Q_INVARIANT_ID(__LINE__, (expr_))
/*! Assertion for a wrong path through the code (based on __LINE__)
*
* @details
* Calls the Q_onAssert() callback if ever executed.
*
* @note
* This macro identifies the problem location with the line number,
* which might change as the code is modified.
*
* @sa Q_ASSERT_ID()
*/
Q_ASSERT_ID(__LINE__, (expr_))
/*! Assertion for a wrong path through the code
*
* @details
* Calls the Q_onAssert() callback if ever executed.
*
* @note
* This macro identifies the problem location with the line number,
* which might change as the code is modified.
*
* @sa Q_ERROR_ID()
*/
Q_ERROR_ID(__LINE__)
/*! General purpose assertion with user-specified ID number that
* **always** evaluates the `expr_` expression.
*
* @details
* Like the Q_ASSERT_ID() macro, except it **always** evaluates the
* `expr_` expression even when assertions are disabled with the
* ::Q_NASSERT macro. However, when the ::Q_NASSERT macro is defined, the
* Q_onAssert() callback is **not** called, even if `expr_` evaluates
* to FALSE.
*
* @param[in] expr_ Boolean expression to check
*
* @sa Q_ALLEGE_ID()
*/
Q_ALLEGE_ID(__LINE__, (expr_))
/*! Static (compile-time) assertion.
*
* @details
* This type of assertion deliberately causes a compile-time error when
* the `expr_` Boolean expression evaluates to FALSE. The macro exploits
* the fact that in C/C++ a dimension of an array cannot be negative.
* The compile-time assertion has no runtime side effects.
*
* @param[in] expr_ Compile-time Boolean expression
*
* @note
* The static assertion macro is provided for backwards compatibility with
* older C standards. Newer C11 supports `_Static_assert()`, which should
* be used instead of Q_ASSERT_STATIC().
*/
extern char Q_static_assert_[(expr_) ? 1 : -1]
/*! Static (compile-time) assertion.
*
* @deprecated
* Use Q_ASSERT_STATIC() or better yet `_Static_assert()` instead.
*/
Q_ASSERT_STATIC(expr_)
/*! typedef for assertions-ids and line numbers in assertions.
*
* @details
* This typedef specifies integer type for exclusive use in assertions.
* Use of this type, rather than plain 'int', is in compliance
* with the MISRA-C 2012 Dir 4.6 (adv).
*/
/*! Callback function invoked in case of an assertion failure.
*
* @details
* This is an application-specific callback function needs to be defined in
* the application to perform the clean system shutdown and perhaps a reset.
* The Q_onAssert() function is the last line of defense after the
* system failure and its implementation shouild be very **carefully**
* designed and **tested** under various fault conditions, including but
* not limited to: stack overflow, stack corruption, or calling Q_onAssert()
* from an interrupt.
* @param[in] module name of the file/module in which the assertion failed
* (constant, zero-terminated C string)
* @param[in] location location of the assertion within the module. This could
* be a line number or a user-specified ID-number.
*
* @returns
* This callback function should **not return** (see ::Q_NORETURN),
* as continuation after an assertion failure does not make sense.
*
* @note
* It is typically a **bad idea** to implement Q_onAssert() as an endless
* loop that ties up the CPU. During debuggin, Q_onAssert() is an ideal
* place to put a breakpoint.
*
* @tr{PQA01_4}
*
* Called by the following: Q_ASSERT_ID(), Q_ERROR_ID(), Q_REQUIRE_ID(),
* Q_ENSURE_ID(), Q_INVARIANT_ID() and Q_ALLEGE_ID() as well as:
* Q_ASSERT(), Q_ERROR(), Q_REQUIRE(), Q_ENSURE(), Q_INVARIANT(),
* and Q_ALLEGE().
*/
/*! Helper macro to calculate static dimension of a 1-dim `array_`
*
* @param array_ 1-dimensional array
* @returns the length of the array (number of elements it can hold)
*/
(sizeof(array_) / sizeof((array_)[0U]))
/*! alias for line numbers in assertions and return from QF_run() */
/*! alias for enumerations used for event signals */
/*! alias for IEEE 754 32-bit floating point number,
* MISRA-C 2012 Dir 4.6(A)
*
* @note
* QP does not use floating-point types anywhere in the internal
* implementation, except in QS software tracing, where utilities for
* output of floating-point numbers are provided for application-specific
* trace records.
*/
/*! alias for IEEE 754 64-bit floating point number,
* MISRA-C 2012 Dir 4.6(A)
*
* @note
* QP does not use floating-point types anywhere in the internal
* implementation, except in QS software tracing, where utilities for
* output of floating-point numbers are provided for application-specific
* trace records.
*/
/*! The size (in bytes) of the signal of an event. Valid values:
* 1U, 2U, or 4U; default 2U
*
* @details
* This macro can be defined in the QEP port file (qep_port.h) to
* configure the ::QSignal type. When the macro is not defined, the
* default of 2 bytes is applied.
*/
2U
/*! Virtual call to the top-most initial transition in a HSM.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] par_ pointer the optional initialization parameter
* @param[in] qs_id_ QS local filter ID (used only when Q_SPY is defined)
*
* @note Must be called only ONCE after the SM "constructor".
*
* @tr{RQP102}
*
* @usage
* The following example illustrates how to initialize a SM, and dispatch
* events to it:
* @include qep_qhsm_use.c
*/
do { \
Q_ASSERT((me_)->vptr); \
(*(me_)->vptr->init)((me_), (par_), (qs_id_)); \
} while (false)
do { \
Q_ASSERT((me_)->vptr); \
(*(me_)->vptr->init)((me_), (par_), 0U); \
} while (false)
/*! Virtual call to dispatch an event to a HSM
*
* @details
* Processes one event at a time in Run-to-Completion fashion.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] e_ constant pointer the ::QEvt or a class
* derived from ::QEvt (see @ref oop)
* @param[in] qs_id_ QS local filter ID (used only when Q_SPY is defined)
*
* @note Must be called after the "constructor" and after QHSM_INIT().
*
* @tr{RQP102}
*/
\
((*(me_)->vptr->dispatch)((me_), (e_), (qs_id_)))
\
((*(me_)->vptr->dispatch)((me_), (e_), 0U))
/*! Perform upcast from a subclass of ::QHsm to the base class ::QHsm
*
* @details
* Upcasting from a subclass to superclass is a very frequent and **safe**
* operation in object-oriented programming and object-oriented languages
* (such as C++) perform such upcasting automatically. However, OOP is
* implemented in C just as a set of coding conventions (see @ref oop),
* and the C compiler does not "know" that certain types are related by
* inheritance. Therefore for C, the upcast must be performed explicitly.
* Unfortunately, pointer casting violates the advisory MISRA-C 2012
* Rule 11.3(req) "A cast shall not be performed between a pointer to object
* type and a pointer to a different object type". This macro encapsulates
* this deviation and provides a descriptive name for the reason of this cast.
*/
((QHsm *)(ptr_))
/*! Perform upcast from a subclass of ::QHsm to the base class ::QHsm
*
* @details
* Upcasting from a subclass to superclass is a very frequent and **safe**
* operation in object-oriented programming and object-oriented languages
* (such as C++) perform such upcasting automatically. However, OOP is
* implemented in C just as a set of coding conventions (see @ref oop),
* and the C compiler does not "know" that certain types are related by
* inheritance. Therefore for C, the upcast must be performed explicitly.
* Unfortunately, pointer casting violates the advisory MISRA-C 2012
* Rule 11.3(req) "A cast shall not be performed between a pointer to object
* type and a pointer to a different object type". This macro encapsulates
* this deviation and provides a descriptive name for the reason of this cast.
*/
\
((Q_HSM_UPCAST(me))->temp.fun = Q_STATE_CAST(target_), Q_RET_TRAN)
/*! Macro to call in a state-handler when it executes a transition
* to history. Applicable only to HSMs.
*
* @tr{RQP103} @tr{RQP120H}
*
* @usage
* @include qep_qhist.c
*/
\
((Q_HSM_UPCAST(me))->temp.fun = (hist_), Q_RET_TRAN_HIST)
/*! Macro to call in a state-handler when it designates the superstate
* of a given state. Applicable only to ::QHsm subclasses.
*
* @tr{RQP103}
*
* @usage
* @include qep_qsuper.c
*/
\
((Q_HSM_UPCAST(me))->temp.fun = Q_STATE_CAST(super_), Q_RET_SUPER)
/*! Macro to call in a state-handler when it handles an event.
* @tr{RQP103} @tr{RQP120B} @tr{RQP120C}
*/
(Q_RET_HANDLED)
/*! Macro to call in a state-handler when it attempts to handle
* an event but a guard condition evaluates to 'false' and there is no other
* explicit way of handling the event. Applicable only to ::QHsm subclasses.
*/
(Q_RET_UNHANDLED)
/*! Macro to provide strictly-typed zero-action to terminate action lists
*! in the transition-action-tables
*/
((QActionHandler)0)
/*! Perform downcast of an event onto a subclass of ::QEvt `class_`
*
* @details
* This macro encapsulates the downcast of ::QEvt pointers, which violates
* MISRA-C 2012 Rule 11.3(R) "A cast shall not be performed between a
* pointer to object type and a pointer to a different object type". This
* macro helps to localize this deviation.
*
* @param class_ a subclass of ::QEvt
*
* @tr{RQP003}
* @tr{PQA11_3}
*/
((class_ const *)(e))
/*! Perform cast to ::QStateHandler.
* @details
* This macro encapsulates the cast of a specific state handler function
* pointer to ::QStateHandler, which violates MISRA:C-2012 Rule 11.1(req)
* "Conversions shall not be performed between a pointer to function and
* any other type". This macro helps to localize this deviation.
*
* @tr{PQP11_1} @tr{PQA11_1}
*
* @usage
* @include qep_qhsm_ctor.c
*/
((QStateHandler)(handler_))
/*! Perform cast to ::QActionHandler.
* @details
* This macro encapsulates the cast of a specific action handler function
* pointer to ::QActionHandler, which violates MISRA:C-2012 Rule 11.1(R)
* "Conversions shall not be performed between a pointer to function and
* any other type". This macro helps to localize this deviation.
*
* @tr{PQP11_1} @tr{PQA11_1}
*/
((QActionHandler)(action_))
/*! Helper macro to clearly mark unused parameters of functions. */
((void)(par_))
/*! Helper macro to calculate static dimension of a 1-dim `array_`
*
* @param array_ 1-dimensional array
* @returns the length of the array (number of elements it can hold)
*/
(sizeof(array_) / sizeof((array_)[0U]))
/*! Perform cast from unsigned integer `uint_` to pointer of type `type_`
*
* @details
* This macro encapsulates the cast to (type_ *), which QP ports or
* application might use to access embedded hardware registers.
* Such uses can trigger PC-Lint "Note 923: cast from int to pointer"
* and this macro helps to encapsulate this deviation.
*/
((type_ *)(uint_))
/*! Initializer of static constant QEvt instances
*
* @details
* This macro encapsulates the ugly casting of enumerated signals
* to QSignal and constants for QEvt.poolID and QEvt.refCtr_.
*/
{ (QSignal)(sig_), 0U, 0U }
/*! Macro to call in a QM action-handler when it executes
* an entry action. Applicable only to ::QMsm subclasses.
*/
\
((Q_HSM_UPCAST(me))->temp.obj = (state_), Q_RET_ENTRY)
(Q_RET_ENTRY)
/*! Macro to call in a QM action-handler when it executes
* an exit action. Applicable only to ::QMsm subclasses.
*/
\
((Q_HSM_UPCAST(me))->temp.obj = (state_), Q_RET_EXIT)
(Q_RET_EXIT)
/*! Macro to call in a QM submachine exit-handler.
* Applicable only to subclasses of ::QMsm.
*/
\
((Q_HSM_UPCAST(me))->temp.obj = (state_), Q_RET_EXIT)
/*! Macro to call in a QM state-handler when it executes a regular
* transition. Applicable only to ::QMsm subclasses.
*/
((Q_HSM_UPCAST(me))->temp.tatbl \
= (struct QMTranActTable const *)(tatbl_), Q_RET_TRAN)
/*! Macro to call in a QM state-handler when it executes an initial
* transition. Applicable only to ::QMsm subclasses.
*/
((Q_HSM_UPCAST(me))->temp.tatbl \
= (struct QMTranActTable const *)(tatbl_), Q_RET_TRAN_INIT)
/*! Macro to call in a QM state-handler when it executes a transition
* to history. Applicable only to ::QMsm subclasses.
*/
\
((((Q_HSM_UPCAST(me))->state.obj = (history_)), \
((Q_HSM_UPCAST(me))->temp.tatbl = \
(struct QMTranActTable const *)(tatbl_))), \
Q_RET_TRAN_HIST)
/*! Macro to call in a QM state-handler when it executes a transition
* to the submachine via an entry point.
*/
((Q_HSM_UPCAST(me))->temp.tatbl \
= (struct QMTranActTable const *)(tatbl_), Q_RET_TRAN_EP)
/*! Macro to call in a QM state-handler when it executes a transition
* to exit point. Applicable only to ::QMsm subclasses.
*/
\
((((Q_HSM_UPCAST(me))->state.act = (xp_)), \
((Q_HSM_UPCAST(me))->temp.tatbl = \
(struct QMTranActTable const *)(tatbl_))), \
Q_RET_TRAN_XP)
/*! Macro to call in a QM state-handler when it handled an event.
* Applicable only to ::QMsm subclasses.
*/
(Q_RET_HANDLED)
/*! Macro to call in a QM state-handler when when it attempts to
* handle an event but a guard condition evaluates to 'false' and there is
* no other explicit way of handling the event. Applicable only to
* ::QMsm subclasses.
*/
(Q_RET_UNHANDLED)
/*! Macro to call in a QM state-handler when it designates the
* superstate to handle an event. Applicable only to QMSMs.
*/
(Q_RET_SUPER)
/*! Macro to call in a QM submachine-handler when it designates the
* host state to handle an event. Applicable only to subclasses of ::QMsm.
*/
\
((Q_HSM_UPCAST(me))->temp.obj = (host_), Q_RET_SUPER_SUB)
/*! Macro to provide strictly-typed zero-state to use for submachines.
*! Applicable to subclasses of ::QMsm.
*/
((QMState *)0)
/*! the current QP version number string in ROM, based on #QP_VERSION_STR */
= QP_VERSION_STR;
/*! ::QSignal represents the signal of an event
*
* @details
* The relationship between an event and a signal is as follows. A signal
* in UML is the specification of an asynchronous stimulus that triggers
* reactions, and as such is an essential part of an event. (The signal
* conveys the type of the occurrence--what happened?) However, an event
* can also contain additional quantitative information about the
* occurrence in form of event parameters.
*/
/*! @brief Event class
* @class QEvt
*
* @details
* ::QEvt represents events without parameters and serves as the base class
* for derivation of events with parameters.
*
* @tr{RQP001} @tr{RQP004}
* @tr{AQP210}
*
* @usage
* The following example illustrates how to add an event parameter by
* derivation of the ::QEvt class. Please note that the ::QEvt member
* super is defined as the FIRST member of the derived struct.
* @include qep_qevt.c
*/
/*! Signal of the event.
* @public @memberof QEvt
*
* @tr{RQP002}
*/
/*! Pool ID (==0 for immutable event)
* @private @memberof QEvt
*
* @tr{RQP003}
*/
/*! Reference counter (for mutable events)
* @private @memberof QEvt
*
* @tr{RQP003}
*/
/*! Custom event constructor
* @public @memberof QEvt
* @note
* Available only when the macro #Q_EVT_CTOR is defined
* @tr{RQP005}
*/
/*! All possible values returned from state/action handlers
*
* @note
* The order matters for algorithmic correctness.
*/
{
/* unhandled and need to "bubble up" */
Q_RET_SUPER, /*!< event passed to superstate to handle */
Q_RET_SUPER_SUB, /*!< event passed to submachine superstate */
Q_RET_UNHANDLED, /*!< event unhandled due to a guard */
/* handled and do not need to "bubble up" */
Q_RET_HANDLED, /*!< event handled (internal transition) */
Q_RET_IGNORED, /*!< event silently ignored (bubbled up to top) */
/* entry/exit */
Q_RET_ENTRY, /*!< state entry action executed */
Q_RET_EXIT, /*!< state exit action executed */
/* no side effects */
Q_RET_NULL, /*!< return value without any effect */
/* transitions need to execute transition-action table in ::QMsm */
Q_RET_TRAN, /*!< regular transition */
Q_RET_TRAN_INIT, /*!< initial transition in a state or submachine */
Q_RET_TRAN_EP, /*!< entry-point transition into a submachine */
/* transitions that additionally clobber me->state */
Q_RET_TRAN_HIST, /*!< transition to history of a given state */
Q_RET_TRAN_XP /*!< exit-point transition out of a submachine */
};
/*! Type returned from state-handler functions */
/*! Pointer to a state-handler function. */
)(void * const me, QEvt const * const e);
/*! Pointer to an action-handler function. */
)(void * const me);
/* forward declaration */
/*! Pointer to an eXthended thread handler function */
)(struct QXThread * const me);
/*! @brief State object for the ::QMsm class (QM State Machine).
*
* @details
* This class groups together the attributes of a ::QMsm state, such as the
* parent state (state nesting), the associated state handler function and
* the exit action handler function. These attributes are used inside the
* QMsm_dispatch() and QMsm_init() functions.
*
* @tr{RQP104}
*
* @attention
* The ::QMState class is only intended for the QM code generator and should
* not be used in hand-crafted code.
*/
{
struct QMState const *superstate; /*!< superstate of this state */
QStateHandler const stateHandler; /*!< state handler function */
QActionHandler const entryAction; /*!< entry action handler function */
QActionHandler const exitAction; /*!< exit action handler function */
QActionHandler const initAction; /*!< init action handler function */
} QMState;
/*! @brief Transition-Action Table for the ::QMsm State Machine. */
{
QMState const *target; /*!< target of the transition */
QActionHandler const act[1]; /*!< array of actions */
} QMTranActTable;
/*! @brief Attribute of for the ::QHsm class (Hierarchical State Machine).
*
* @details
* This union represents possible values stored in the 'state' and 'temp'
* attributes of the ::QHsm class.
*/
{
QStateHandler fun; /*!< @private pointer to a state-handler */
QActionHandler act; /*!< @private pointer to an action-handler */
QXThreadHandler thr; /*!< @private pointer to an thread-handler */
QMTranActTable const *tatbl; /*!< @private transition-action table */
struct QMState const *obj; /*!< @private pointer to QMState object */
};
/*! maximum depth of state nesting in a HSM (including the top level),
* must be >= 3
*/
= 6};
/*! Reserved signals by the HSM-style state machine
* implementation strategy.
*/
{
Q_EMPTY_SIG, /*!< signal to execute the default case */
Q_ENTRY_SIG, /*!< signal for coding entry actions */
Q_EXIT_SIG, /*!< signal for coding exit actions */
Q_INIT_SIG, /*!< signal for coding initial transitions */
Q_USER_SIG /*!< offset for the user signals (QP Application) */
};
/*! @brief Hierarchical State Machine class
* @class QHsm
*
* @details
* QHsm represents a Hierarchical State Machine (HSM) with full support for
* hierarchical nesting of states, entry/exit actions, initial transitions,
* and transitions to history in any composite state. This class is designed
* for ease of manual coding of HSMs in C, but it is also supported by the
* QM modeling tool.<br>
*
* QHsm is also the base class for the QMsm state machine, which provides
* a superior efficiency, but requires the use of the QM modeling tool to
* generate code.
*
* @note
* QHsm is not intended to be instantiated directly, but rather serves as the
* abstract base class for derivation of state machines in the QP application.
*
* @tr{RQP103}
* @tr{AQP211}
*
* @usage
* The following example illustrates how to derive a state machine class
* from QHsm. Please note that the QHsm member `super` is defined as the
* FIRST member of the derived class.
* @include qep_qhsm.c
*/
/*! Virtual pointer
* @private @memberof QHsm
*
* @tr{RQP102}
*/
/*! Current active state (state-variable).
* @private @memberof QHsm
*/
/*! Temporary: target/act-table, etc.
* @private @memberof QHsm
*/
/*! Tests if a given state is part of the current active state
* configuration in ::QHsm subclasses.
* @public @memberof QHsm
*
* @details
* Tests if a state machine derived from QHsm is-in a given state.
*
* @note For a HSM, to "be in a state" means also to be in a superstate of
* of the state.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] state pointer to the state-handler function to be tested
*
* @returns
*'true' if the HSM "is in" the `state` and 'false' otherwise
*
* @tr{RQP103}
* @tr{RQP120S}
*/
/*! @pre the state configuration must be stable */
Q_REQUIRE_ID(600, me->temp.fun == me->state.fun);
bool inState = false; /* assume that this HSM is not in 'state' */
/* scan the state hierarchy bottom-up */
QState r;
do {
/* do the states match? */
if (me->temp.fun == state) {
inState = true; /* 'true' means that match found */
r = Q_RET_IGNORED; /* break out of the loop */
}
else {
r = QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
}
} while (r != Q_RET_IGNORED); /* QHsm_top() state not reached */
me->temp.fun = me->state.fun; /* restore the stable state configuration */
return inState; /* return the status */
/*! Obtain the current active state from a HSM (read only).
* @public @memberof QHsm
*
* @param[in] me current instance pointer (see @ref oop)
*
* @returns the current active state of the QHsm class
*
* @note
* This function is used in QM for auto-generating code for state history.
*/
return me->state.fun;
/*! Obtain the current active child state of a given parent in ::QHsm
* @public @memberof QHsm
*
* @details
* Finds the child state of the given `parent`, such that this child state
* is an ancestor of the currently active state. The main purpose of this
* function is to support **shallow history** transitions in state machines
* derived from QHsm.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] parent pointer to the state-handler function
*
* @returns
* the child of a given `parent` state, which is an ancestor of the current
* active state. For the corner case when the currently active state is the
* given `parent` state, function returns the `parent` state.
*
* @note
* this function is designed to be called during state transitions, so it
* does not necessarily start in a stable state configuration.
* However, the function establishes stable state configuration upon exit.
*
* @tr{RQP103}
* @tr{RQP120H}
*/
QStateHandler child = me->state.fun; /* start with the current state */
bool isFound = false; /* start with the child not found */
/* establish stable state configuration */
me->temp.fun = me->state.fun;
QState r;
do {
/* is this the parent of the current child? */
if (me->temp.fun == parent) {
isFound = true; /* child is found */
r = Q_RET_IGNORED; /* break out of the loop */
}
else {
child = me->temp.fun;
r = QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
}
} while (r != Q_RET_IGNORED); /* QHsm_top() state not reached */
me->temp.fun = me->state.fun; /* establish stable state configuration */
/*! @post the child must be found */
Q_ENSURE_ID(810, isFound);
#ifdef Q_NASSERT
Q_UNUSED_PAR(isFound);
#endif
return child; /* return the child */
/*! Protected "constructor" of ::QHsm
* @protected @memberof QHsm
*
* @details
* Performs the first step of HSM initialization by assigning the initial
* pseudostate to the currently active state of the state machine.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] initial pointer to the top-most initial state-handler
* function in the derived state machine
*
* @note Must be called only by the constructors of the derived state
* machines.
*
* @note Must be called only ONCE before QHSM_INIT().
*
* @usage
* The following example illustrates how to invoke QHsm_ctor() in the
* "constructor" of a derived state machine:
* @include qep_qhsm_ctor.c
*
* @tr{RQP103}
*/
static struct QHsmVtable const vtable = { /* QHsm virtual table */
&QHsm_init_,
&QHsm_dispatch_
#ifdef Q_SPY
,&QHsm_getStateHandler_
#endif
};
me->vptr = &vtable;
me->state.fun = Q_STATE_CAST(&QHsm_top);
me->temp.fun = initial;
const
/*! The top-state of QHsm.
* @protected @memberof QHsm
*
* @details
* QHsm_top() is the ultimate root of state hierarchy in all HSMs derived
* from ::QHsm.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be dispatched to the FSM
*
* @returns
* Always returns ::Q_RET_IGNORED, which means that the top state ignores
* all events.
*
* @note The parameters to this state handler are not used. They are provided
* for conformance with the state-handler function signature ::QStateHandler.
*
* @tr{RQP103} @tr{RQP120T}
*/
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(e);
return Q_RET_IGNORED; /* the top state ignores all events */
/*! Implementation of the top-most initial tran. in ::QHsm.
* @protected @memberof QHsm
*
* @details
* Executes the top-most initial transition in a HSM.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to an extra parameter (might be NULL)
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @note Must be called only ONCE after the QHsm_ctor().
*
* @tr{RQP103} @tr{RQP120I} @tr{RQP120D}
*/
#ifdef Q_SPY
if ((QS_priv_.flags & 0x01U) == 0U) {
QS_priv_.flags |= 0x01U;
QS_FUN_DICTIONARY(&QHsm_top);
}
#else
Q_UNUSED_PAR(qs_id);
#endif
QStateHandler t = me->state.fun;
/*! @pre the virtual pointer must be initialized, the top-most initial
* transition must be initialized, and the initial transition must not
* be taken yet.
*/
Q_REQUIRE_ID(200, (me->vptr != (struct QHsmVtable *)0)
&& (me->temp.fun != Q_STATE_CAST(0))
&& (t == Q_STATE_CAST(&QHsm_top)));
/* execute the top-most initial tran. */
QState r = (*me->temp.fun)(me, Q_EVT_CAST(QEvt));
/* the top-most initial transition must be taken */
Q_ASSERT_ID(210, r == Q_RET_TRAN);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the source state */
QS_FUN_PRE_(me->temp.fun); /* the target of the initial transition */
QS_END_PRE_()
/* drill down into the state hierarchy with initial transitions... */
do {
QStateHandler path[QHSM_MAX_NEST_DEPTH_]; /* tran entry path array */
int_fast8_t ip = 0; /* tran entry path index */
path[0] = me->temp.fun;
(void)QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
while (me->temp.fun != t) {
++ip;
Q_ASSERT_ID(220, ip < QHSM_MAX_NEST_DEPTH_);
path[ip] = me->temp.fun;
(void)QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
}
me->temp.fun = path[0];
/* nested initial transition, drill into the target hierarchy... */
do {
QHsm_state_entry_(me, path[ip], qs_id); /* enter path[ip] */
--ip;
} while (ip >= 0);
t = path[0]; /* current state becomes the new source */
r = QHsm_reservedEvt_(me, t, Q_INIT_SIG); /* execute initial transition */
#ifdef Q_SPY
if (r == Q_RET_TRAN) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the source state */
QS_FUN_PRE_(me->temp.fun); /* target of the initial tran. */
QS_END_PRE_()
}
#endif /* Q_SPY */
} while (r == Q_RET_TRAN);
QS_BEGIN_PRE_(QS_QEP_INIT_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the new active state */
QS_END_PRE_()
me->state.fun = t; /* change the current active state */
me->temp.fun = t; /* mark the configuration as stable */
/*! Implementation of dispatching events to a ::QHsm
* @protected @memberof QHsm
*
* @details
* Dispatches an event for processing to a hierarchical state machine (HSM).
* The processing of an event represents one run-to-completion (RTC) step.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be dispatched to the HSM
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @note
* This function should be called only via the virtual table (see
* QHSM_DISPATCH()) and should NOT be called directly in the applications.
*
* @tr{RQP103}
* @tr{RQP120A} @tr{RQP120B} @tr{RQP120C} @tr{RQP120D} @tr{RQP120E}
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QStateHandler t = me->state.fun;
QS_CRIT_STAT_
/*! @pre the current state must be initialized and
* the state configuration must be stable
*/
Q_REQUIRE_ID(400, (t != Q_STATE_CAST(0))
&& (t == me->temp.fun));
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the current state */
QS_END_PRE_()
QStateHandler s;
QState r;
/* process the event hierarchically... */
do {
s = me->temp.fun;
r = (*s)(me, e); /* invoke state handler s */
if (r == Q_RET_UNHANDLED) { /* unhandled due to a guard? */
QS_BEGIN_PRE_(QS_QEP_UNHANDLED, qs_id)
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s); /* the current state */
QS_END_PRE_()
r = QHsm_reservedEvt_(me, s, Q_EMPTY_SIG); /* find superstate of s */
}
} while (r == Q_RET_SUPER);
/* regular transition taken? */
/*! @tr{RQP120E} */
if (r >= Q_RET_TRAN) {
QStateHandler path[QHSM_MAX_NEST_DEPTH_];
path[0] = me->temp.fun; /* save the target of the transition */
path[1] = t;
path[2] = s;
/* exit current state to transition source s... */
/*! @tr{RQP120C} */
for (; t != s; t = me->temp.fun) {
/* exit from t handled? */
if (QHsm_state_exit_(me, t, qs_id)) {
/* find superstate of t */
(void)QHsm_reservedEvt_(me, t, Q_EMPTY_SIG);
}
}
int_fast8_t ip = QHsm_tran_(me, path, qs_id); /* the HSM transition */
#ifdef Q_SPY
if (r == Q_RET_TRAN_HIST) {
QS_BEGIN_PRE_(QS_QEP_TRAN_HIST, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the source of the transition */
QS_FUN_PRE_(path[0]); /* the target of tran. to history */
QS_END_PRE_()
}
#endif /* Q_SPY */
/* execute state entry actions in the desired order... */
/*! @tr{RQP120B} */
for (; ip >= 0; --ip) {
QHsm_state_entry_(me, path[ip], qs_id); /* enter path[ip] */
}
t = path[0]; /* stick the target into register */
me->temp.fun = t; /* update the next state */
/* while nested initial transition... */
/*! @tr{RQP120I} */
while (QHsm_reservedEvt_(me, t, Q_INIT_SIG) == Q_RET_TRAN) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t); /* the source (pseudo)state */
QS_FUN_PRE_(me->temp.fun); /* the target of the tran. */
QS_END_PRE_()
ip = 0;
path[0] = me->temp.fun;
/* find superstate */
(void)QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
while (me->temp.fun != t) {
++ip;
path[ip] = me->temp.fun;
/* find superstate */
(void)QHsm_reservedEvt_(me, me->temp.fun, Q_EMPTY_SIG);
}
me->temp.fun = path[0];
/* entry path must not overflow */
Q_ASSERT_ID(410, ip < QHSM_MAX_NEST_DEPTH_);
/* retrace the entry path in reverse (correct) order... */
do {
QHsm_state_entry_(me, path[ip], qs_id); /* enter path[ip] */
--ip;
} while (ip >= 0);
t = path[0]; /* current state becomes the new source */
}
QS_BEGIN_PRE_(QS_QEP_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s); /* the source of the transition */
QS_FUN_PRE_(t); /* the new active state */
QS_END_PRE_()
}
#ifdef Q_SPY
else if (r == Q_RET_HANDLED) {
QS_BEGIN_PRE_(QS_QEP_INTERN_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s); /* the source state */
QS_END_PRE_()
}
else {
QS_BEGIN_PRE_(QS_QEP_IGNORED, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->state.fun); /* the current state */
QS_END_PRE_()
}
#endif /* Q_SPY */
me->state.fun = t; /* change the current active state */
me->temp.fun = t; /* mark the configuration as stable */
/*! Implementation of getting the state handler in a ::QHsm subclass
* @private @memberof QHsm
*/
return me->state.fun;
/*! Helper function to execute transition sequence in a hierarchical state
* machine (HSM).
* @private @memberof QHsm
*
* @param[in,out] path array of pointers to state-handler functions
* to execute the entry actions
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @returns
* the depth of the entry path stored in the `path` parameter.
*
* @tr{RQP103}
* @tr{RQP120E} @tr{RQP120F}
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
int_fast8_t ip = -1; /* transition entry path index */
QStateHandler t = path[0];
QStateHandler const s = path[2];
/* (a) check source==target (transition to self)... */
if (s == t) {
(void)QHsm_state_exit_(me, s, qs_id); /* exit source */
ip = 0; /* enter the target */
}
else {
/* find superstate of target */
(void)QHsm_reservedEvt_(me, t, Q_EMPTY_SIG);
t = me->temp.fun;
/* (b) check source==target->super... */
if (s == t) {
ip = 0; /* enter the target */
}
else {
/* find superstate of src */
(void)QHsm_reservedEvt_(me, s, Q_EMPTY_SIG);
/* (c) check source->super==target->super... */
if (me->temp.fun == t) {
(void)QHsm_state_exit_(me, s, qs_id); /* exit source */
ip = 0; /* enter the target */
}
else {
/* (d) check source->super==target... */
if (me->temp.fun == path[0]) {
(void)QHsm_state_exit_(me, s, qs_id); /* exit source */
}
else {
/* (e) check rest of source==target->super->super..
* and store the entry path along the way
*/
int_fast8_t iq = 0; /* indicate that LCA not found */
ip = 1; /* enter target and its superstate */
path[1] = t; /* save the superstate of target */
t = me->temp.fun; /* save source->super */
/* find target->super->super... */
QState r = QHsm_reservedEvt_(me, path[1], Q_EMPTY_SIG);
while (r == Q_RET_SUPER) {
++ip;
path[ip] = me->temp.fun; /* store the entry path */
if (me->temp.fun == s) { /* is it the source? */
iq = 1; /* indicate that LCA found */
/* entry path must not overflow */
Q_ASSERT_ID(510,
ip < QHSM_MAX_NEST_DEPTH_);
--ip; /* do not enter the source */
r = Q_RET_HANDLED; /* terminate loop */
}
/* it is not the source, keep going up */
else {
r = QHsm_reservedEvt_(me, me->temp.fun,
Q_EMPTY_SIG);
}
}
/* the LCA not found yet? */
if (iq == 0) {
/* entry path must not overflow */
Q_ASSERT_ID(520, ip < QHSM_MAX_NEST_DEPTH_);
/* exit source */
(void)QHsm_state_exit_(me, s, qs_id);
/* (f) check the rest of source->super
* == target->super->super...
*/
iq = ip;
r = Q_RET_IGNORED; /* LCA NOT found */
do {
if (t == path[iq]) { /* is this the LCA? */
r = Q_RET_HANDLED; /* LCA found */
ip = iq - 1; /* do not enter LCA */
iq = -1; /* cause termintion of the loop */
}
else {
--iq; /* try lower superstate of target */
}
} while (iq >= 0);
/* LCA not found? */
if (r != Q_RET_HANDLED) {
/* (g) check each source->super->...
* for each target->super...
*/
r = Q_RET_IGNORED; /* keep looping */
do {
/* exit from t handled? */
if (QHsm_state_exit_(me, t, qs_id)) {
/* find superstate of t */
(void)QHsm_reservedEvt_(me, t, Q_EMPTY_SIG);
}
t = me->temp.fun; /* set to super of t */
iq = ip;
do {
/* is this LCA? */
if (t == path[iq]) {
/* do not enter LCA */
ip = (int_fast8_t)(iq - 1);
iq = -1; /* break out of inner loop */
/* break out of outer loop */
r = Q_RET_HANDLED;
}
else {
--iq;
}
} while (iq >= 0);
} while (r != Q_RET_HANDLED);
}
}
}
}
}
}
return ip;
/*! Helper function to execute entry into a given state in a
* hierarchical state machine (HSM).
* @private @memberof QHsm
*
* @param[in] state state handler function
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*/
#ifdef Q_SPY
if ((*state)(me, &l_reservedEvt_[Q_ENTRY_SIG]) == Q_RET_HANDLED) {
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(me);
QS_FUN_PRE_(state);
QS_END_PRE_()
}
#else
Q_UNUSED_PAR(qs_id);
(void)(*state)(me, &l_reservedEvt_[Q_ENTRY_SIG]);
#endif /* Q_SPY */
/*! Helper function to execute exit from a given state in a
* hierarchical state machine (HSM).
* @private @memberof QHsm
*
* @param[in] state state handler function
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @returns
* 'true' if the exit action has been found in the state and
* 'flase' otherwise.
*/
#ifdef Q_SPY
bool isHandled;
if ((*state)(me, &l_reservedEvt_[Q_EXIT_SIG]) == Q_RET_HANDLED) {
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(me);
QS_FUN_PRE_(state);
QS_END_PRE_()
isHandled = true;
}
else {
isHandled = false;
}
return isHandled;
#else
Q_UNUSED_PAR(qs_id);
return (*state)(me, &l_reservedEvt_[Q_EXIT_SIG]) == Q_RET_HANDLED;
#endif /* Q_SPY */
/*! @brief Virtual table for the ::QHsm class.
*
* @tr{RQP102}
*/
{
/*! Triggers the top-most initial transition in the HSM. */
void (*init)(QHsm * const me, void const * const e,
uint_fast8_t const qs_id);
/*! Dispatches an event to the HSM. */
void (*dispatch)(QHsm * const me, QEvt const * const e,
uint_fast8_t const qs_id);
#ifdef Q_SPY
/*! Get the current state handler of the HSM. */
QStateHandler (*getStateHandler)(QHsm * const me);
#endif /* Q_SPY */
};
/*! @brief QM state machine implementation strategy
* @class QMsm
* @extends QHsm
*
* @details
* QMsm (QM State Machine) provides a more efficient state machine
* implementation strategy than ::QHsm, but requires the use of the QM
* modeling tool, but are the fastest and need the least run-time
* support (the smallest event-processor taking up the least code space).
*
* @note
* QMsm is not intended to be instantiated directly, but rather serves
* as the abstrace base class for derivation of state machines in the
* application code.
*
* @tr{RQP104}
*
* @usage
* The following example illustrates how to derive a state machine class
* from QMsm. Please note that the QMsm member `super` is defined
* as the *first* member of the derived struct.
* @include qep_qmsm.c
*/
const
/*! Tests if a given state is part of the current active state
* configuration in a MSM.
* @public @memberof QMsm
*
* @details
* Tests if a state machine derived from QMsm is-in a given state.
*
* @note
* For a MSM, to "be-in" a state means also to "be-in" a superstate of
* of the state.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] state pointer to the QMState object that corresponds to the
* tested state.
* @returns
* 'true' if the MSM "is in" the `state` and 'false' otherwise
*/
bool inState = false; /* assume that this MSM is not in 'state' */
for (QMState const *s = me->super.state.obj;
s != (QMState *)0;
s = s->superstate)
{
if (s == state) {
inState = true; /* match found, return 'true' */
break;
}
}
return inState;
/*! Obtain the current active state from a MSM (read only)
* @public @memberof QMsm
*
* @param[in] me current instance pointer (see @ref oop)
*
* @returns the current active state-object
*
* @note
* This function is used in QM for auto-generating code for state history
*/
return me->state.obj;
/*! Obtain the current active child state of a given parent in ::QMsm
* @public @memberof QMsm
*
* @details
* Finds the child state of the given @c parent, such that this child state
* is an ancestor of the currently active state. The main purpose of this
* function is to support **shallow history** transitions in state machines
* derived from QMsm.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] parent pointer to the state-handler object
*
* @returns
* the child of a given @c parent state, which is an ancestor of
* the currently active state. For the corner case when the currently active
* state is the given @c parent state, function returns the @c parent state.
*
* @sa QMsm_childStateObj()
*/
QMState const *child = me->state.obj;
bool isFound = false; /* start with the child not found */
QMState const *s;
for (s = me->state.obj; s != (QMState *)0; s = s->superstate) {
if (s == parent) {
isFound = true; /* child is found */
break;
}
else {
child = s;
}
}
if (!isFound) { /* still not found? */
for (s = me->temp.obj; s != (QMState *)0; s = s->superstate) {
if (s == parent) {
isFound = true; /* child is found */
break;
}
else {
child = s;
}
}
}
/*! @post the child must be found */
Q_ENSURE_ID(810, isFound);
#ifdef Q_NASSERT
Q_UNUSED_PAR(isFound);
#endif
return child; /* return the child */
/*! Constructor of ::QMsm
* @protected @memberof QMsm
*
* @details
* Performs the first step of QMsm initialization by assigning the initial
* pseudostate to the currently active state of the state machine.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] initial the top-most initial transition for the MSM.
*
* @note
* Must be called only ONCE before QHSM_INIT().
*
* @note
* QMsm inherits QHsm, so by the @ref oop convention it should call the
* constructor of the superclass, i.e., QHsm_ctor(). However, this would pull
* in the QHsmVtable, which in turn will pull in the code for QHsm_init_() and
* QHsm_dispatch_() implemetations. To avoid this code size penalty, in case
* ::QHsm is not used in a given project, the QMsm_ctor() performs direct
* intitialization of the Vtable, which avoids pulling in the code for QMsm.
*
* @usage
* The following example illustrates how to invoke QMsm_ctor() in the
* "constructor" of a derived state machine:
* @include qep_qmsm_ctor.c
*/
static struct QHsmVtable const vtable = { /* QHsm virtual table */
&QMsm_init_,
&QMsm_dispatch_
#ifdef Q_SPY
,&QMsm_getStateHandler_
#endif
};
/* do not call the QHsm_ctor() here */
me->super.vptr = &vtable;
me->super.state.obj = &l_msm_top_s; /* the current state (top) */
me->super.temp.fun = initial; /* the initial transition handler */
/*! Implementation of the top-most initial tran. in ::QMsm.
* @private @memberof QMsm
*
* @details
* Executes the top-most initial transition in a MSM.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to an extra parameter (might be NULL)
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @note
* This function should be called only via the virtual table (see
* QHSM_INIT()) and should NOT be called directly in the applications.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
/*! @pre the virtual pointer must be initialized, the top-most initial
* transition must be initialized, and the initial transition must not
* be taken yet.
*/
Q_REQUIRE_ID(200, (me->vptr != (struct QHsmVtable *)0)
&& (me->temp.fun != Q_STATE_CAST(0))
&& (me->state.obj == &l_msm_top_s));
/* execute the top-most initial tran. */
QState r = (*me->temp.fun)(me, Q_EVT_CAST(QEvt));
/* the top-most initial transition must be taken */
Q_ASSERT_ID(210, r == Q_RET_TRAN_INIT);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->state.obj->stateHandler); /* source state */
QS_FUN_PRE_(me->temp.tatbl->target->stateHandler); /* target state */
QS_END_PRE_()
/* set state to the last tran. target */
me->state.obj = me->temp.tatbl->target;
/* drill down into the state hierarchy with initial transitions... */
/* execute the tran. table */
do {
r = QMsm_execTatbl_(me, me->temp.tatbl, qs_id);
} while (r >= Q_RET_TRAN_INIT);
QS_BEGIN_PRE_(QS_QEP_INIT_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->state.obj->stateHandler); /* the new current state */
QS_END_PRE_()
/*! Implementation of dispatching events to a ::QMsm
* @private @memberof QMsm
*
* @details
* Dispatches an event for processing to a meta state machine (MSM).
* The processing of an event represents one run-to-completion (RTC) step.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be dispatched to the MSM
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @note
* This function should be called only via the virtual table (see
* QHSM_DISPATCH()) and should NOT be called directly in the applications.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QMState const *s = me->state.obj; /* store the current state */
QMState const *t = s;
/*! @pre current state must be initialized */
Q_REQUIRE_ID(300, s != (QMState *)0);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s->stateHandler); /* the current state handler */
QS_END_PRE_()
/* scan the state hierarchy up to the top state... */
QState r;
do {
r = (*t->stateHandler)(me, e); /* call state handler function */
/* event handled? (the most frequent case) */
if (r >= Q_RET_HANDLED) {
break; /* done scanning the state hierarchy */
}
/* event unhandled and passed to the superstate? */
else if (r == Q_RET_SUPER) {
t = t->superstate; /* advance to the superstate */
}
/* event unhandled and passed to a submachine superstate? */
else if (r == Q_RET_SUPER_SUB) {
t = me->temp.obj; /* current host state of the submachie */
}
/* event unhandled due to a guard? */
else if (r == Q_RET_UNHANDLED) {
QS_BEGIN_PRE_(QS_QEP_UNHANDLED, qs_id)
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t->stateHandler); /* the current state */
QS_END_PRE_()
t = t->superstate; /* advance to the superstate */
}
else {
/* no other return value should be produced */
Q_ERROR_ID(310);
}
} while (t != (QMState *)0);
/* any kind of transition taken? */
if (r >= Q_RET_TRAN) {
#ifdef Q_SPY
QMState const * const ts = t; /* transition source for QS tracing */
/* the transition source state must not be NULL */
Q_ASSERT_ID(320, ts != (QMState *)0);
#endif /* Q_SPY */
do {
/* save the transition-action table before it gets clobbered */
struct QMTranActTable const * const tatbl = me->temp.tatbl;
union QHsmAttr tmp; /* temporary to save intermediate values */
/* was TRAN, TRAN_INIT, or TRAN_EP taken? */
if (r <= Q_RET_TRAN_EP) {
me->temp.obj = (QMState *)0; /* clear */
QMsm_exitToTranSource_(me, s, t, qs_id);
r = QMsm_execTatbl_(me, tatbl, qs_id);
s = me->state.obj;
}
/* was a transition segment to history taken? */
else if (r == Q_RET_TRAN_HIST) {
tmp.obj = me->state.obj; /* save history */
me->state.obj = s; /* restore the original state */
QMsm_exitToTranSource_(me, s, t, qs_id);
(void)QMsm_execTatbl_(me, tatbl, qs_id);
r = QMsm_enterHistory_(me, tmp.obj, qs_id);
s = me->state.obj;
}
/* was a transition segment to an exit point taken? */
else if (r == Q_RET_TRAN_XP) {
tmp.act = me->state.act; /* save XP action */
me->state.obj = s; /* restore the original state */
r = (*tmp.act)(me); /* execute the XP action */
if (r == Q_RET_TRAN) { /* XP -> TRAN ? */
tmp.tatbl = me->temp.tatbl; /* save me->temp */
QMsm_exitToTranSource_(me, s, t, qs_id);
/* take the tran-to-XP segment inside submachine */
(void)QMsm_execTatbl_(me, tatbl, qs_id);
s = me->state.obj;
#ifdef Q_SPY
me->temp.tatbl = tmp.tatbl; /* restore me->temp */
#endif /* Q_SPY */
}
else if (r == Q_RET_TRAN_HIST) { /* XP -> HIST ? */
tmp.obj = me->state.obj; /* save the history */
me->state.obj = s; /* restore the original state */
s = me->temp.obj; /* save me->temp */
QMsm_exitToTranSource_(me, me->state.obj, t, qs_id);
/* take the tran-to-XP segment inside submachine */
(void)QMsm_execTatbl_(me, tatbl, qs_id);
#ifdef Q_SPY
me->temp.obj = s; /* restore me->temp */
#endif /* Q_SPY */
s = me->state.obj;
me->state.obj = tmp.obj; /* restore the history */
}
else {
/* TRAN_XP must NOT be followed by any other tran type */
Q_ASSERT_ID(330, r < Q_RET_TRAN);
}
}
else {
/* no other return value should be produced */
Q_ERROR_ID(340);
}
t = s; /* set target to the current state */
} while (r >= Q_RET_TRAN);
QS_BEGIN_PRE_(QS_QEP_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(ts->stateHandler); /* the transition source */
QS_FUN_PRE_(s->stateHandler); /* the new active state */
QS_END_PRE_()
}
#ifdef Q_SPY
/* was the event handled? */
else if (r == Q_RET_HANDLED) {
/* internal tran. source can't be NULL */
Q_ASSERT_ID(340, t != (QMState *)0);
QS_BEGIN_PRE_(QS_QEP_INTERN_TRAN, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(t->stateHandler); /* the source state */
QS_END_PRE_()
}
/* event bubbled to the 'top' state? */
else if (t == (QMState *)0) {
QS_BEGIN_PRE_(QS_QEP_IGNORED, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s->stateHandler); /* the current state */
QS_END_PRE_()
}
#endif /* Q_SPY */
else {
/* empty */
}
/*! Implementation of getting the state handler in a ::QMsm subclass
* @public @memberof QMsm
*/
return me->state.obj->stateHandler;
/*! Execute transition-action table
* @private @memberof QMsm
*
* @details
* Helper function to execute transition sequence in a transition-action table.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] tatbl pointer to the transition-action table
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @returns
* status of the last action from the transition-action table.
*
* @note
* This function is for internal use inside the QEP event processor and
* should **not** be called directly from the applications.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QState r = Q_RET_NULL;
QS_CRIT_STAT_
/*! @pre the transition-action table pointer must not be NULL */
Q_REQUIRE_ID(400, tatbl != (struct QMTranActTable *)0);
for (QActionHandler const *a = &tatbl->act[0];
*a != Q_ACTION_CAST(0);
++a)
{
r = (*(*a))(me); /* call the action through the 'a' pointer */
#ifdef Q_SPY
if (r == Q_RET_ENTRY) {
QS_BEGIN_PRE_(QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->temp.obj->stateHandler);/*entered state */
QS_END_PRE_()
}
else if (r == Q_RET_EXIT) {
QS_BEGIN_PRE_(QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->temp.obj->stateHandler); /* exited state */
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_INIT) {
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(tatbl->target->stateHandler); /* source */
QS_FUN_PRE_(me->temp.tatbl->target->stateHandler);/* target */
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_EP) {
QS_BEGIN_PRE_(QS_QEP_TRAN_EP, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(tatbl->target->stateHandler); /* source */
QS_FUN_PRE_(me->temp.tatbl->target->stateHandler);/* target */
QS_END_PRE_()
}
else if (r == Q_RET_TRAN_XP) {
QS_BEGIN_PRE_(QS_QEP_TRAN_XP, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(tatbl->target->stateHandler); /* source */
QS_FUN_PRE_(me->temp.tatbl->target->stateHandler);/* target */
QS_END_PRE_()
}
else {
/* empty */
}
#endif /* Q_SPY */
}
me->state.obj = (r >= Q_RET_TRAN)
? me->temp.tatbl->target
: tatbl->target;
return r;
/*! Exit the current state up to the explicit transition source
* @private @memberof QMsm
*
* @details
* Static helper function to exit the current state configuration to the
* transition source, which in a hierarchical state machine might be a
* superstate of the current state.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] cs pointer to the current state
* @param[in] ts pointer to the transition source state
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
/* exit states from the current state to the tran. source state */
QMState const *s = cs;
while (s != ts) {
/* exit action provided in state 's'? */
if (s->exitAction != Q_ACTION_CAST(0)) {
QS_CRIT_STAT_
(void)(*s->exitAction)(me); /* execute the exit action */
QS_BEGIN_PRE_(QS_QEP_STATE_EXIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(s->stateHandler); /* the exited state handler */
QS_END_PRE_()
}
s = s->superstate; /* advance to the superstate */
if (s == (QMState *)0) { /* reached the top of a submachine? */
s = me->temp.obj; /* the superstate from QM_SM_EXIT() */
Q_ASSERT_ID(510, s != (QMState *)0); /* must be valid */
}
}
/*! Enter history of a composite state
* @private @memberof QMsm
*
* @details
* Static helper function to execute the segment of transition to history
* after entering the composite state and
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] hist pointer to the history substate
* @param[in] qs_id QS-id of this state machine (for QS local filter)
*
* @returns
* #Q_RET_TRAN_INIT, if an initial transition has been executed in the last
* entered state or #Q_RET_NULL if no such transition was taken.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QMState const *s = hist;
QMState const *ts = me->state.obj; /* transition source */
QMState const *epath[QMSM_MAX_ENTRY_DEPTH_];
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_TRAN_HIST, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(ts->stateHandler); /* source state handler */
QS_FUN_PRE_(hist->stateHandler); /* target state handler */
QS_END_PRE_()
int_fast8_t i = 0; /* transition entry path index */
while (s != ts) {
if (s->entryAction != Q_ACTION_CAST(0)) {
Q_ASSERT_ID(620, i < QMSM_MAX_ENTRY_DEPTH_);
epath[i] = s;
++i;
}
s = s->superstate;
if (s == (QMState *)0) {
ts = s; /* force exit from the for-loop */
}
}
/* retrace the entry path in reverse (desired) order... */
while (i > 0) {
--i;
(void)(*epath[i]->entryAction)(me); /* run entry action in epath[i] */
QS_BEGIN_PRE_(QS_QEP_STATE_ENTRY, qs_id)
QS_OBJ_PRE_(me);
QS_FUN_PRE_(epath[i]->stateHandler); /* entered state handler */
QS_END_PRE_()
}
me->state.obj = hist; /* set current state to the transition target */
/* initial tran. present? */
QState r;
if (hist->initAction != Q_ACTION_CAST(0)) {
r = (*hist->initAction)(me); /* execute the transition action */
}
else {
r = Q_RET_NULL;
}
return r;
/*! Maximum number of active objects (configurable value in qf_port.h)
* Valid values: [1U..64U]; default 32U
*/
32U
/*! Maximum number of clock rates (configurable value in qf_port.h)
* Valid values: [0U..15U]; default 1U
*/
1U
/*! Maximum number of event pools (configurable value in qf_port.h)
* Valid values: [0U..15U]; default 3U
*
* @note
* #QF_MAX_EPOOL set to zero means that dynamic events are NOT configured
* and should not be used in the application.
*/
3U
/*! Size of the QTimeEvt counter (configurable value in qf_port.h)
* Valid values: 1U, 2U, or 4U; default 4U
*/
4U
/*! Size of the event-size (configurable value in qf_port.h)
* Valid values: 1U, 2U, or 4U; default 2U
*/
2U
/*! bitmask for the internal representation of QPSet elements */
/*! Data type to store the block-size defined based on the macro
* #QF_TIMEEVT_CTR_SIZE.
*
* @details
* The dynamic range of this data type determines the maximum block
* size that can be managed by the pool.
*/
const
/*! Log-base-2 calculation when hardware acceleration
* is NOT provided (#QF_LOG2 not defined).
*/
static uint8_t const log2LUT[16] = {
0U, 1U, 2U, 2U, 3U, 3U, 3U, 3U,
4U, 4U, 4U, 4U, 4U, 4U, 4U, 4U
};
uint_fast8_t n = 0U;
QPSetBits t;
#if (QF_MAX_ACTIVE > 16U)
t = (QPSetBits)(x >> 16U);
if (t != 0U) {
n += 16U;
x = t;
}
#endif
#if (QF_MAX_ACTIVE > 8U)
t = (x >> 8U);
if (t != 0U) {
n += 8U;
x = t;
}
#endif
t = (x >> 4U);
if (t != 0U) {
n += 4U;
x = t;
}
return n + log2LUT[x];
/*! Priority specification for Active Objects in QP
*
* @details
* Active Object priorities in QP are integer numbers in the range
* [1..#QF_MAX_ACTIVE], whereas the special priority number 0 is reserved
* for the lowest-priority idle thread. The QP framework uses the *direct*
* priority numbering, in which higher numerical values denote higher urgency.
* For example, an AO with priority 32 has higher urgency than an AO with
* priority 23.
*
* ::QPrioSpec allows an application developer to assign **two**
* priorities to a given AO (see also Q_PRIO()):
*
* 1. The "QF-priority", which resides in the least-significant byte
* of the ::QPrioSpec data type. The "QF-priority" must be **unique**
* for each thread in the system and higher numerical values represent
* higher urgency (direct pirority numbering).
*
* 2. The "preemption-threshold" priority, which resides in the most-
* significant byte of the ::QPrioSpec data type. The second priority
* cannot be lower than the "QF-priority", but does NOT need to be
* unuque.
*
* In the QP native preemptive kernels, like QK and QXK, the "preemption-
* threshold" priority is used as to implement the "preemption-threshold
* scheduling" (PTS). It determines the conditions under which a given
* thread can be *preempted* by other threads. Specifically, a given
* thread can be preempted only by another thread with a *higher*
* priority than the "preemption-threshold" of the original thread.
*
* ![QF-priority and preemption-threshold relations](qp-prio.png)
*
* @note
* For backwards-compatibility, ::QPrioSpec data type might contain only
* the "QF-priority" component (and the "preemption-threshold" component
* left at zero). In that case, the "preemption-threshold" will be assumed
* to be the same as the "QF-priority". This corresponds exactly to the
* previous semantics of AO priority.
*
* @remark
* When QP runs on top of 3rd-party kernels/RTOSes or general-purpose
* operating systems, sthe second priority can have different meaning,
* depending on the specific RTOS/GPOS used.
*/
/*! The scheduler lock status used in some real-time kernels */
/*! @brief Priority Set of up to #QF_MAX_ACTIVE elements
* @class QPSet
*
* @details
* The priority set represents the set of active objects that are ready to
* run and need to be considered by the scheduling algorithm. The set is
* capable of storing up to #QF_MAX_ACTIVE priority levels, which can be
* configured in the rage 1..64, inclusive.
*/
/*! bitmask with a bit for each element */
/*! bitmasks with a bit for each element */
/*! Make the priority set empty */
#if (QF_MAX_ACTIVE <= 32)
me->bits = 0U;
#else
me->bits[0] = 0U;
me->bits[1] = 0U;
#endif
const
/*! Return 'true' if the priority set is empty */
#if (QF_MAX_ACTIVE <= 32)
return (me->bits == 0U);
#else
return (me->bits[0] == 0U) ? (me->bits[1] == 0U) : false;
#endif
const
/*! Return 'true' if the priority set is NOT empty */
#if (QF_MAX_ACTIVE <= 32)
return (me->bits != 0U);
#else
return (me->bits[0] != 0U) ? true : (me->bits[1] != 0U);
#endif
const
/*! Return 'true' if the priority set has the element n. */
#if (QF_MAX_ACTIVE <= 32U)
return (me->bits & (1U << (n - 1U))) != 0U;
#else
return (n <= 32U)
? ((me->bits[0] & ((uint32_t)1U << (n - 1U))) != 0U)
: ((me->bits[1] & ((uint32_t)1U << (n - 33U))) != 0U);
#endif
/*! insert element `n` into the set (n = 1..::QF_MAX_ACTIVE) */
#if (QF_MAX_ACTIVE <= 32U)
me->bits = (me->bits | (1U << (n - 1U)));
#else
if (n <= 32U) {
me->bits[0] = (me->bits[0] | ((uint32_t)1U << (n - 1U)));
}
else {
me->bits[1] = (me->bits[1] | ((uint32_t)1U << (n - 33U)));
}
#endif
/*! Remove element `n` from the set (n = 1U..::QF_MAX_ACTIVE) */
#if (QF_MAX_ACTIVE <= 32U)
me->bits = (me->bits &
(QPSetBits)(~((QPSetBits)1U << (n - 1U))));
#else
if (n <= 32U) {
(me->bits[0] = (me->bits[0] & ~((uint32_t)1U << (n - 1U))));
}
else {
(me->bits[1] = (me->bits[1] & ~((uint32_t)1U << (n - 33U))));
}
#endif
const
/*! Find the maximum element in the set, returns zero if the set is empty */
#if (QF_MAX_ACTIVE <= 32)
return QF_LOG2(me->bits);
#else
return (me->bits[1] != 0U)
? (QF_LOG2(me->bits[1]) + 32U)
: (QF_LOG2(me->bits[0]));
#endif
/*! Subscriber List (for publish-subscribe)
*
* @details
* This data type represents a set of Active Objects that subscribe to
* a given signal. The set is represented as priority-set, where each
* bit corresponds to the unique QF-priority of an AO (see ::QPrioSpec).
*/
/*! Special value of margin that causes asserting failure in case
* event allocation or event posting fails
*/
((uint_fast16_t)0xFFFFU)
/*! Create a ::QPrioSpec object to specify priorty of an AO or a thread */
((QPrioSpec)((prio_) | ((pthre_) << 8U)))
/*! Allocate a dynamic event (case when ::QEvt is a POD)
*
* @details
* The macro calls the internal QF function QF::newX_() with
* margin == ::QF_NO_MARGIN, which causes an assertion when the event
* cannot be successfully allocated.
*
* @param[in] evtT_ event type (class name) of the event to allocate
* @param[in] sig_ signal to assign to the newly allocated event
*
* @returns a valid event pointer cast to the type `evtT_`.
*
* @note
* If #Q_EVT_CTOR is defined, the Q_NEW() macro becomes variadic and
* takes all the arguments needed by the constructor of the event
* class being allocated. The constructor is then called by means
* of the placement-new operator.
*
* @usage
* The following example illustrates dynamic allocation of an event:
* @include qf_post.c
*/
((evtT_ *)QF_newX_((uint_fast16_t)sizeof(evtT_), \
QF_NO_MARGIN, (enum_t)(sig_)))
/*! Asserting allocate a dynamic event
* (case when ::QEvt is not a POD)
*/
\
(evtT_##_ctor((evtT_ *)QF_newX_((uint_fast16_t)sizeof(evtT_), \
QF_NO_MARGIN, (sig_)), (enum_t)(sig_), ##__VA_ARGS__))
/*! Non-asserting allocate a dynamic event (case when ::QEvt is a POD).
*
* @details
* This macro allocates a new event and sets the pointer `e_`, while
* leaving at least `margin_` of events still available in the pool
*
* @param[out] e_ pointer to the newly allocated event
* @param[in] evtT_ event type (class name) of the event to allocate
* @param[in] margin_ number of events that must remain available
* in the given pool after this allocation. The
* special value ::QF_NO_MARGIN causes asserting
* failure in case event allocation fails.
* @param[in] sig_ signal to assign to the newly allocated event
*
* @returns an event pointer cast to the type `evtT_` or NULL if the
* event cannot be allocated with the specified `margin`.
*
* @note
* If #Q_EVT_CTOR is defined, the Q_NEW_X() macro becomes variadic and
* takes all the arguments needed by the constructor of the event
* class being allocated. The constructor is then called by means
* of the placement-new operator.
*
* @usage
* The following example illustrates dynamic allocation of an event:
* @include qf_postx.c
*/
((e_) = \
(evtT_ *)QF_newX_((uint_fast16_t)sizeof(evtT_), \
(margin_), (enum_t)(sig_)))
/*! Non-asserting allocate a dynamic event
* (case when ::QEvt is not a POD)
*/
do { \
(e_) = (evtT_ *)QF_newX_((uint_fast16_t)sizeof(evtT_), \
(margin_), (enum_t)(sig_));\
if ((e_) != (evtT_ *)0) { \
evtT_##_ctor((e_), (enum_t)(sig_), ##__VA_ARGS__); \
} \
} while (false)
/*! Create a new reference of the current event `e`
*
* @details
* The current event processed by an active object is available only for
* the duration of the run-to-completion (RTC) step. After that step, the
* current event is no longer available and the framework might recycle
* (garbage-collect) the event. The macro Q_NEW_REF() explicitly creates
* a new reference to the current event that can be stored and used beyond
* the current RTC step, until the reference is explicitly recycled by
* means of the macro Q_DELETE_REF().
*
* @param[in,out] evtRef_ event reference to create
* @param[in] evtT_ event type (class name) of the event reference
*
* @usage
* The example **defer** in the directory `examples/win32/defer` illustrates
* the use of Q_NEW_REF()
*
* @sa Q_DELETE_REF()
*/
\
((evtRef_) = (evtT_ const *)QF_newRef_(e, (evtRef_)))
/*! Delete the event reference
*
* @details
* Every event reference created with the macro Q_NEW_REF() needs to be
* eventually deleted by means of the macro Q_DELETE_REF() to avoid leaking
* the event.
*
* @param[in,out] evtRef_ event reference to delete
*
* @usage
* The example **defer** in the directory `examples/win32/defer` illustrates
* the use of Q_DELETE_REF()
*
* @sa Q_NEW_REF()
*/
do { \
QF_deleteRef_((evtRef_)); \
(evtRef_) = (void *)0; \
} while (false)
/*! Virtual call to start an active object.
*
* @details
* Starts execution of the AO and registers the AO with the framework.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] prioSpec_ priority specification for the Active Object
* @param[in] qSto_ pointer to the storage for the ring buffer of the
* event queue (used only with the built-in ::QEQueue)
* @param[in] qLen_ length of the event queue (in events)
* @param[in] stkSto_ pointer to the stack storage (used only when
* per-AO stack is needed)
* @param[in] stkSize_ stack size (in bytes)
* @param[in] par_ pointer to the additional port-specific parameter(s)
* (might be NULL).
* @usage
* @include qf_start.c
*/
do { \
Q_ASSERT((Q_HSM_UPCAST(me_))->vptr); \
(*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->start)( \
(QActive *)(me_), (prioSpec_), \
(qSto_), (qLen_), (stkSto_), (stkSize_), (par_)); \
} while (false)
/*! Invoke the direct event posting facility QActive_post_()
*
* @details
* This macro asserts if the queue overflows and cannot accept the event.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] e_ pointer to the event to post
* @param[in] sender_ pointer to the sender object.
*
* @note
* The `sendedr_` parameter is actually only used when QS tracing
* is enabled (macro #Q_SPY is defined). When QS software tracing is
* disenabled, the QACTIVE_POST() macro does not pass the `sender_`
* parameter, so the overhead of passing this extra parameter is entirely
* avoided.
*
* @note the pointer to the sender object is not necessarily a pointer
* to an active object. In fact, if QACTIVE_POST() is called from an
* interrupt or other context, you can create a unique object just to
* unambiguously identify the sender of the event.
*
* @sa QActive_post_()
*/
\
((void)(*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->post)(\
(me_), (e_), QF_NO_MARGIN, (sender_)))
\
((void)(*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->post)(\
(me_), (e_), QF_NO_MARGIN, (void *)0))
/*! Invoke the direct event posting facility QActive_post_()
* without delivery guarantee
*
* @details
* This macro does not assert if the queue overflows and cannot accept
* the event with the specified margin of free slots remaining.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] e_ pointer to the event to post
* @param[in] margin_ the minimum free slots in the queue, which
* must still be available after posting the event.
* The special value ::QF_NO_MARGIN causes
* asserting failure in case event posting fails.
* @param[in] sender_ pointer to the sender object.
*
* @returns
* 'true' if the posting succeeded, and 'false' if the posting
* failed due to insufficient margin of free entries available in
* the queue.
*
* @note
* The `sender_` parameter is actually only used when QS tracing
* is enabled (macro #Q_SPY is defined). When QS software tracing is
* disabled, the POST_X() macro does not pass the `sender_` parameter,
* so the overhead of passing this extra parameter is entirely avoided.
*
* @note
* The pointer to the sender object is not necessarily a pointer
* to an active object. In fact, if POST_X() is called from an
* interrupt or other context, you can create a unique object just to
* unambiguously identify the sender of the event.
*
* @usage
* @include qf_postx.c
*/
\
((*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->post)((me_),\
(e_), (margin_), (sender_)))
\
((*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->post)((me_),\
(e_), (margin_), (void *)0))
/*! Virtual call to post an event to an active object using the
* Last-In-First-Out (LIFO) policy.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] e_ pointer to the event to post
*/
\
((*((QActiveVtable const *)((Q_HSM_UPCAST(me_))->vptr))->postLIFO)( \
(me_), (e_)))
/*! Publish an event to all subscriber Active Objects.
*
* @details
* If #Q_SPY is defined, this macro calls QActive_publish_() with
* the `sender_` parameter to identify the publisher of the event.
* Otherwise, `sender_` is not used.
*
* @param[in] e_ pointer to the posted event
* @param[in] sender_ pointer to the sender object (actually used
* only when #Q_SPY is defined)
*
* @note
* The pointer to the `sender_` object is not necessarily a pointer
* to an active object. In fact, if QACTIVE_PUBLISH() is called from an
* interrupt or other context, you can create a unique object just to
* unambiguously identify the sender of the event.
*
* @sa QActive_publish_()
*/
\
(QActive_publish_((e_), (void const *)(sender_), (sender_)->prio))
(QActive_publish_((e_), (void *)0, 0U))
/*! Invoke the system clock tick processing QTimeEvt_tick_()
*
* @details
* This macro is the recommended way of invoking clock tick processing,
* because it provides the vital information for software tracing and
* avoids any overhead when the tracing is disabled.
*
* @param[in] tickRate_ clock tick rate to be serviced through this call
* @param[in] sender_ pointer to the sender object. This parameter
* is actually only used when QS software tracing is enabled
* (macro #Q_SPY is defined)
* @note
* When QS software tracing is disabled, the macro calls QTimeEvt_tick_()
* without the `sender` parameter, so the overhead of passing this
* extra parameter is entirely avoided.
*
* @note
* The pointer to the sender object is not necessarily a pointer
* to an active object. In fact, when QTIMEEVT_TICK_X() is called from
* an interrupt, you would create a unique object just to unambiguously
* identify the ISR as the sender of the time events.
*
* @sa QTimeEvt_tick_()
*/
\
(QTimeEvt_tick_((tickRate_), (sender_)))
\
(QTimeEvt_tick_((tickRate_), (void *)0))
/*! Invoke the system clock tick processing
* for tick rate 0
*/
QTIMEEVT_TICK_X(0U, (sender_))
/*! No-operation for exiting a critical section
*
* @details
* In some QF ports the critical section exit takes effect only on the
* next machine instruction. If this next instruction is another entry
* to a critical section, the critical section won't be really exited,
* but rather the two adjecent critical sections would be merged.
* The QF_CRIT_EXIT_NOP() macro contains minimal code required to
* prevent such merging of critical sections in such merging of
* critical sections in QF ports, in which it can occur.
*/
((void)0)
/*! Invoke the system clock tick processing
*
* @deprecated
* superseded by QTIMEEVT_TICK_X()
*/
QTIMEEVT_TICK_X((tickRate_), (sender_))
/*! Invoke the system clock tick processing for tick rate 0
*
* @deprecated
* superseded by QTIMEEVT_TICK()
*/
QTIMEEVT_TICK(sender_)
/*! Publish an event to all subscriber Active Objects.
*
* @deprecated
* superseded by QACTIVE_PUBLISH()
*/
QACTIVE_PUBLISH((e_), (sender_))
/*! @brief Active object class (based on the QHsm implementation strategy)
* @class QActive
* @extends QHsm
*
* @details
* Active objects are encapsulated tasks (each containing an event queue and
* a state machine) that communicate with one another asynchronously by
* sending and receiving events. Within an active object, events are
* processed in a run-to-completion (RTC) fashion, while QF encapsulates
* all the details of thread-safe event exchange and queuing.<br>
*
* QActive represents an active object that uses the QHsm-style
* implementation strategy for state machines. This strategy is tailored
* to manual coding, but it is also supported by the QM modeling tool.
* The resulting code is slower than in the ::QMsm-style implementation
* strategy.
*
* @note
* QActive is not intended to be instantiated directly, but rather serves
* as the abstract base class for derivation of active objects in the
* applications.
*
* @sa QMActive
*
* @usage
* The following example illustrates how to derive an active object from
* QActive.
* @include qf_qactive.c
*/
/*! OS-dependent event-queue type
* @private @memberof QActive
*
* @details
* The type of the queue depends on the underlying operating system or
* a kernel. Many kernels support "message queues" that can be adapted
* to deliver QF events to the active object. Alternatively, QF provides
* a native event queue implementation that can be used as well.
*
* @note
* The native QF event queue is configured by defining the macro
* #QF_EQUEUE_TYPE as ::QEQueue.
*/
/*! OS-dependent per-thread object
* @private @memberof QActive
*
* @details
* This data might be used in various ways, depending on the QF port.
* In some ports me->osObject is used to block the calling thread when
* the native QF queue is empty. In other QF ports the OS-dependent
* object might be used differently.
*/
/*! OS-dependent representation of the thread of the active object
* @private @memberof QActive
*
* @details
* This data might be used in various ways, depending on the QF port.
* In some ports me->thread is used store the thread handle. In other ports
* me->thread can be a pointer to the Thread-Local-Storage (TLS).
*/
/*! QF-priority [1..#QF_MAX_ACTIVE] of this AO.
* @private @memberof QActive
* @sa ::QPrioSpec
*/
/*! preemption-threshold [1..#QF_MAX_ACTIVE] of this AO.
* @private @memberof QActive
* @sa ::QPrioSpec
*/
/*! Internal array of registered active objects
* @static @private @memberof QActive
*/
/*! pointer to the array of all subscriber AOs for a given event signal.
* @static @private @memberof QActive
*/
/*! The maximum published signal (the size of the subscrList_ array)
* @static @private @memberof QActive
*/
/*! Internal array of registered active objects
* @static @private @memberof QActive
*/
/*! ::QActive constructor (abstract base class)
* @protected @memberof QActive
*/
static QActiveVtable const vtable = { /* QActive virtual table */
{ &QHsm_init_,
&QHsm_dispatch_
#ifdef Q_SPY
,&QHsm_getStateHandler_
#endif
},
&QActive_start_,
&QActive_post_,
&QActive_postLIFO_
};
/* clear the whole QActive object, so that the framework can start
* correctly even if the startup code fails to clear the uninitialized
* data (as is required by the C Standard).
*/
QF_bzero(me, sizeof(*me));
QHsm_ctor(&me->super, initial); /* explicitly call superclass' ctor */
me->super.vptr = &vtable.super; /* hook the vptr to QActive vtable */
/*! Starts execution of an active object and registers the object
* with the framework
* @private @memberof QActive
*
* @details
* Starts execution of the AO and registers the AO with the framework.
*
* @param[in] prioSpec priority specification for the AO (See ::QPrioSpec)
* @param[in] qSto pointer to the storage for the ring buffer of the
* event queue
* @param[in] qLen length of the event queue [# ::QEvt* pointers]
* @param[in] stkSto pointer to the stack storage (might be NULL)
* @param[in] stkSize stack size [bytes]
* @param[in] par pointer to an extra parameter (might be NULL)
*
* @usage
* The following example shows starting an AO when a per-task stack
* is needed:
* @include qf_start.c
*/
/*! Stops execution of an active object and removes it from the
* framework's supervision
* @protected @memberof QActive
*
* @attention
* QActive_stop() must be called only from the AO that is about
* to stop its execution. By that time, any pointers or references
* to the AO are considered invalid (dangling) and it becomes
* illegal for the rest of the application to post events to the AO.
*/
/*! Posts an event `e` directly to the event queue of the active object
* using the First-In-First-Out (FIFO) policy.
* @private @memberof QActive
*
* @details
* Direct event posting is the simplest asynchronous communication
* method available in QF.
*
* @param[in] e pointer to the event to be posted
* @param[in] margin number of required free slots in the queue
* after posting the event or ::QF_NO_MARGIN.
* @param[in] sender pointer to a sender object (used in QS only)
*
* @returns
* 'true' (success) if the posting succeeded (with the provided margin)
* and 'false' (failure) when the posting fails.
*
* @attention
* For `margin` == ::QF_NO_MARGIN, this function will assert internally
* if the event posting fails. In that case, it is unnecessary to check
* the retrun value from this function.
*
* @note
* This function might be implemented differently in various QP/C++
* ports. The provided implementation assumes that the ::QEQueue
* class is used for the ::QActive event queue.
*
* @usage
* @include qf_post.cpp
*
* @sa
* QActive_postLIFO()
*/
#ifndef Q_SPY
Q_UNUSED_PAR(sender);
#endif
/*! @pre event pointer must be valid */
Q_REQUIRE_ID(100, e != (QEvt *)0);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = me->eQueue.nFree; /* get volatile into the temporary */
/* test-probe#1 for faking queue overflow */
QS_TEST_PROBE_DEF(&QActive_post_)
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
bool status;
if (margin == QF_NO_MARGIN) {
if (nFree > 0U) {
status = true; /* can post */
}
else {
status = false; /* cannot post */
Q_ERROR_CRIT_(110); /* must be able to post the event */
}
}
else if (nFree > (QEQueueCtr)margin) {
status = true; /* can post */
}
else {
status = false; /* cannot post, but don't assert */
}
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
if (status) { /* can post the event? */
--nFree; /* one free entry just used up */
me->eQueue.nFree = nFree; /* update the volatile */
if (me->eQueue.nMin > nFree) {
me->eQueue.nMin = nFree; /* increase minimum so far */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object (recipient) */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(me->eQueue.nMin); /* min number of free entries */
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
/* callback to examine the posted event under the same conditions
* as producing the #QS_QF_ACTIVE_POST trace record, which are:
* the local filter for this AO ('me->prio') is set
*/
if (QS_LOC_CHECK_(me->prio)) {
/* callback to examine the posted event */
QS_onTestPost(sender, me, e, status);
}
#endif
/* empty queue? */
if (me->eQueue.frontEvt == (QEvt *)0) {
me->eQueue.frontEvt = e; /* deliver event directly */
QACTIVE_EQUEUE_SIGNAL_(me); /* signal the event queue */
}
/* queue is not empty, insert event into the ring-buffer */
else {
/* insert event into the ring buffer (FIFO) */
me->eQueue.ring[me->eQueue.head] = e;
if (me->eQueue.head == 0U) { /* need to wrap head? */
me->eQueue.head = me->eQueue.end; /* wrap around */
}
--me->eQueue.head; /* advance the head (counter clockwise) */
}
QF_CRIT_X_();
}
else { /* cannot post the event */
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_ATTEMPT, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object (recipient) */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(margin); /* margin requested */
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
/* callback to examine the posted event under the same conditions
* as producing the #QS_QF_ACTIVE_POST trace record, which are:
* the local filter for this AO ('me->prio') is set
*/
if (QS_LOC_CHECK_(me->prio)) {
QS_onTestPost(sender, me, e, status);
}
#endif
QF_CRIT_X_();
#if (QF_MAX_EPOOL > 0U)
QF_gc(e); /* recycle the event to avoid a leak */
#endif
}
return status;
/*! Posts an event `e` directly to the event queue of the active object
* using the Last-In-First-Out (LIFO) policy.
* @private @memberof QActive
*
* @details
* The LIFO policy should be used only for self-posting and with caution,
* because it alters order of events in the queue.
*
* @param[in] e pointer to the event to be posted
*
* @attention
* This function asserts internally if the posting fails.
*
* @note
* This function might be implemented differently in various QP/C++
* ports. The provided implementation assumes that the ::QEQueue
* class is used for the QActive event queue.
*
* @sa
* QActive_post()
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = me->eQueue.nFree; /* get volatile into the temporary */
/* test-probe#1 for faking queue overflow */
QS_TEST_PROBE_DEF(&QActive_postLIFO_)
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
/* the queue must be able to accept the event (cannot overflow) */
Q_ASSERT_CRIT_(210, nFree != 0U);
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
--nFree; /* one free entry just used up */
me->eQueue.nFree = nFree; /* update the volatile */
if (me->eQueue.nMin > nFree) {
me->eQueue.nMin = nFree; /* update minimum so far */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_LIFO, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_);/* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* # free entries */
QS_EQC_PRE_(me->eQueue.nMin); /* min number of free entries */
QS_END_NOCRIT_PRE_()
#ifdef Q_UTEST
/* callback to examine the posted event under the same conditions
* as producing the #QS_QF_ACTIVE_POST trace record, which are:
* the local filter for this AO ('me->prio') is set
*/
if (QS_LOC_CHECK_(me->prio)) {
QS_onTestPost((QActive *)0, me, e, true);
}
#endif
/* temporary to avoid UB for volatile access */
QEvt const * const frontEvt = me->eQueue.frontEvt;
me->eQueue.frontEvt = e; /* deliver the event directly to the front */
/* was the queue empty? */
if (frontEvt == (QEvt *)0) {
QACTIVE_EQUEUE_SIGNAL_(me); /* signal the event queue */
}
/* queue was not empty, leave the event in the ring-buffer */
else {
++me->eQueue.tail;
/* need to wrap the tail? */
if (me->eQueue.tail == me->eQueue.end) {
me->eQueue.tail = 0U; /* wrap around */
}
me->eQueue.ring[me->eQueue.tail] = frontEvt;
}
QF_CRIT_X_();
/*! Get an event from the event queue of an active object
* @private @memberof QActive
*
* @details
* The behavior of this function depends on the kernel used in the
* QF port. For built-in kernels (Vanilla or QK) the function can be
* called only when the queue is not empty, so it doesn't block. For
* a blocking kernel/OS the function can block and wait for delivery
* of an event.
*
* @returns
* A pointer to the received event. The returned pointer is guaranteed
* to be valid (can't be NULL).
*
* @note
* This function might be implemented differently in various QP/C++
* ports. The provided implementation assumes that the ::QEQueue
* class is used for the QActive event queue.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QACTIVE_EQUEUE_WAIT_(me); /* wait for event to arrive directly */
/* always remove event from the front */
QEvt const * const e = me->eQueue.frontEvt;
QEQueueCtr const nFree = me->eQueue.nFree + 1U; /* volatile into tmp */
me->eQueue.nFree = nFree; /* update the number of free */
/* any events in the ring buffer? */
if (nFree <= me->eQueue.end) {
/* remove event from the tail */
me->eQueue.frontEvt = me->eQueue.ring[me->eQueue.tail];
if (me->eQueue.tail == 0U) { /* need to wrap the tail? */
me->eQueue.tail = me->eQueue.end; /* wrap around */
}
--me->eQueue.tail;
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* # free entries */
QS_END_NOCRIT_PRE_()
}
else {
me->eQueue.frontEvt = (QEvt *)0; /* queue becomes empty */
/* all entries in the queue must be free (+1 for fronEvt) */
Q_ASSERT_CRIT_(310, nFree == (me->eQueue.end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET_LAST, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return e;
const
/*! Subscribes for delivery of signal `sig` to the active object
* @public @memberof QActive
*
* @details
* This function is part of the Publish-Subscribe event delivery
* mechanism available in QF. Subscribing to an event means that the
* framework will start posting all published events with a given signal
* `sig` to the event queue of the active object.
*
* @param[in] sig event signal to subscribe
*
* The following example shows how the Table active object subscribes
* to three signals in the initial transition:
* @include qf_subscribe.cpp
*
* @sa
* QActive_publish_(), QActive_unsubscribe(), and
* QActive_unsubscribeAll()
*/
uint_fast8_t const p = (uint_fast8_t)me->prio;
Q_REQUIRE_ID(300, ((enum_t)Q_USER_SIG <= sig)
&& (sig < QActive_maxPubSignal_)
&& (0U < p) && (p <= QF_MAX_ACTIVE)
&& (QActive_registry_[p] == me));
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_SUBSCRIBE, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_END_NOCRIT_PRE_()
/* set the priority bit */
QPSet_insert(&QActive_subscrList_[sig], p);
QF_CRIT_X_();
const
/*! Unsubscribes from the delivery of signal `sig` to the active object
* @public @memberof QActive
*
* @details
* This function is part of the Publish-Subscribe event delivery
* mechanism available in QF. Un-subscribing from an event means that
* the framework will stop posting published events with a given signal
* `sig` to the event queue of the active object.
*
* @param[in] sig event signal to unsubscribe
*
* @note
* Due to the latency of event queues, an active object should NOT
* assume that a given signal `sig` will never be dispatched to the
* state machine of the active object after un-subscribing from that
* signal. The event might be already in the queue, or just about to
* be posted and the un-subscribe operation will not flush such events.
*
* @note
* Un-subscribing from a signal that has never been subscribed in the
* first place is considered an error and QF will raise an assertion.
*
* @sa
* QActive_publish_(), QActive_subscribe(), and
* QActive_unsubscribeAll()
*/
uint_fast8_t const p = (uint_fast8_t)me->prio;
/*! @pre the singal and the prioriy must be in ragne, the AO must also
* be registered with the framework
*/
Q_REQUIRE_ID(400, ((enum_t)Q_USER_SIG <= sig)
&& (sig < QActive_maxPubSignal_)
&& (0U < p) && (p <= QF_MAX_ACTIVE)
&& (QActive_registry_[p] == me));
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_UNSUBSCRIBE, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_END_NOCRIT_PRE_()
/* clear priority bit */
QPSet_remove(&QActive_subscrList_[sig], p);
QF_CRIT_X_();
const
/*! Unsubscribes from the delivery of all signals to the active object
* @public @memberof QActive
*
* @details
* This function is part of the Publish-Subscribe event delivery
* mechanism available in QF. Un-subscribing from all events means that
* the framework will stop posting any published events to the event
* queue of the active object.
*
* @note
* Due to the latency of event queues, an active object should NOT
* assume that no events will ever be dispatched to the state machine of
* the active object after un-subscribing from all events.
* The events might be already in the queue, or just about to be posted
* and the un-subscribe operation will not flush such events. Also, the
* alternative event-delivery mechanisms, such as direct event posting or
* time events, can be still delivered to the event queue of the active
* object.
*
* @sa
* QActive_publish_(), QActive_subscribe(), and QActive_unsubscribe()
*/
uint_fast8_t const p = (uint_fast8_t)me->prio;
Q_REQUIRE_ID(500, (0U < p) && (p <= QF_MAX_ACTIVE)
&& (QActive_registry_[p] == me));
for (enum_t sig = (enum_t)Q_USER_SIG; sig < QActive_maxPubSignal_; ++sig) {
QF_CRIT_STAT_
QF_CRIT_E_();
if (QPSet_hasElement(&QActive_subscrList_[sig], p)) {
QPSet_remove(&QActive_subscrList_[sig], p);
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_UNSUBSCRIBE, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
/* prevent merging critical sections */
QF_CRIT_EXIT_NOP();
}
/*! Publish event to all subscribers of a given signal `e->sig`
* @static @public @memberof QActive
*
* @details
* This function posts (using the FIFO policy) the event @a e to **all**
* active objects that have subscribed to the signal @a e->sig, which is
* called _multicasting_. The multicasting performed in this function is
* very efficient based on reference-counting inside the published event
* ("zero-copy" event multicasting). This function is designed to be
* callable from any part of the system, including ISRs, device drivers,
* and active objects.
*
* @note
* To avoid any unexpected re-ordering of events posted into AO queues,
* the event multicasting is performed with scheduler **locked**.
* However, the scheduler is locked only up to the priority level of
* the highest-priority subscriber, so any AOs of even higher priority,
* which did not subscribe to this event are *not* affected.
*/
QActive_subscrList_ = subscrSto;
QActive_maxPubSignal_ = maxSignal;
/* zero the subscriber list, so that the framework can start correctly
* even if the startup code fails to clear the uninitialized data
* (as is required by the C Standard).
*/
QF_bzero(subscrSto, (uint_fast16_t)maxSignal * sizeof(QSubscrList));
/*! Publish event to all subscribers of a given signal `e->sig`
* @static @private @memberof QActive
*
* @details
* This function posts (using the FIFO policy) the event @a e to **all**
* active objects that have subscribed to the signal @a e->sig, which is
* called _multicasting_. The multicasting performed in this function is
* very efficient based on reference-counting inside the published event
* ("zero-copy" event multicasting). This function is designed to be
* callable from any part of the system, including ISRs, device drivers,
* and active objects.
*
* @note
* To avoid any unexpected re-ordering of events posted into AO queues,
* the event multicasting is performed with scheduler **locked**.
* However, the scheduler is locked only up to the priority level of
* the highest-priority subscriber, so any AOs of even higher priority,
* which did not subscribe to this event are *not* affected.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(sender);
Q_UNUSED_PAR(qs_id);
#endif
/*! @pre the published signal must be within the configured range */
Q_REQUIRE_ID(200, e->sig < (QSignal)QActive_maxPubSignal_);
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_PUBLISH, qs_id)
QS_TIME_PRE_(); /* the timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_);/* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
/* NOTE: The reference counter of a dynamic event is incremented to
* prevent premature recycling of the event while the multicasting
* is still in progress. At the end of the function, the garbage
* collector step (QF_gc()) decrements the reference counter and
* recycles the event if the counter drops to zero. This covers the
* case when the event was published without any subscribers.
*/
QEvt_refCtr_inc_(e);
}
/* make a local, modifiable copy of the subscriber list */
QPSet subscrList = QActive_subscrList_[e->sig];
QF_CRIT_X_();
if (QPSet_notEmpty(&subscrList)) { /* any subscribers? */
/* the highest-prio subscriber */
uint_fast8_t p = QPSet_findMax(&subscrList);
QActive *a = QActive_registry_[p];
QF_SCHED_STAT_
QF_SCHED_LOCK_(a->prio); /* lock the scheduler up to AO's prio */
do { /* loop over all subscribers */
/* the prio of the AO must be registered with the framework */
Q_ASSERT_ID(210, a != (QActive *)0);
/* QACTIVE_POST() asserts internally if the queue overflows */
QACTIVE_POST(a, e, sender);
QPSet_remove(&subscrList, p); /* remove the handled subscriber */
if (QPSet_notEmpty(&subscrList)) { /* still more subscribers? */
/* highest-prio subscriber */
p = QPSet_findMax(&subscrList);
a = QActive_registry_[p];
}
else {
p = 0U; /* no more subscribers */
}
} while (p != 0U);
QF_SCHED_UNLOCK_(); /* unlock the scheduler */
}
/* The following garbage collection step decrements the reference counter
* and recycles the event if the counter drops to zero. This covers both
* cases when the event was published with or without any subscribers.
*/
#if (QF_MAX_EPOOL > 0U)
QF_gc(e); /* recycle the event to avoid a leak */
#endif
const
/*! Defer an event to a given separate event queue
* @protected @memberof QActive
*
* @details
* This function is part of the event deferral support. An active object
* uses this function to defer an event `e` to the QF-supported native
* event queue `eq`. QF correctly accounts for another outstanding
* reference to the event and will not recycle the event at the end of
* the RTC step. Later, the active object might recall one event at a
* time from the event queue.
*
* @param[in] eq pointer to a "raw" thread-safe queue to recall
* an event from.
* @param[in] e pointer to the event to be deferred
*
* @returns
* 'true' (success) when the event could be deferred and 'false'
* (failure) if event deferral failed due to overflowing the queue.
*
* An active object can use multiple event queues to defer events of
* different kinds.
*
* @sa
* QActive_recall(), ::QEQueue, QActive_flushDeferred()
*/
bool const status = QEQueue_post(eq, e, 0U, me->prio);
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_ACTIVE_DEFER, me->prio)
QS_TIME_PRE_(); /* time stamp */
QS_OBJ_PRE_(me); /* this active object */
QS_OBJ_PRE_(eq); /* the deferred queue */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_PRE_()
return status;
/*! Recall a deferred event from a given event queue
* @protected @memberof QActive
*
* @details
* This function is part of the event deferral support. An active object
* uses this function to recall a deferred event from a given QF
* event queue. Recalling an event means that it is removed from the
* deferred event queue `eq` and posted (LIFO) to the event queue of
* the active object.
*
* @param[in] eq pointer to a "raw" thread-safe queue to recall
* an event from.
*
* @returns
* 'true' if an event has been recalled and 'false' if not.
*
* @note
* An active object can use multiple event queues to defer events of
* different kinds.
*
* @sa
* QActive_recall(), QActive_postLIFO_(), ::QEQueue
*/
QEvt const * const e = QEQueue_get(eq, me->prio);
bool recalled;
/* event available? */
if (e != (QEvt *)0) {
QF_CRIT_STAT_
QACTIVE_POST_LIFO(me, e); /* post it to the front of the AO's queue */
QF_CRIT_E_();
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
/* after posting to the AO's queue the event must be referenced
* at least twice: once in the deferred event queue (eq->get()
* did NOT decrement the reference counter) and once in the
* AO's event queue.
*/
Q_ASSERT_CRIT_(210, e->refCtr_ >= 2U);
/* we need to decrement the reference counter once, to account
* for removing the event from the deferred event queue.
*/
QEvt_refCtr_dec_(e); /* decrement the reference counter */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_RECALL, me->prio)
QS_TIME_PRE_(); /* time stamp */
QS_OBJ_PRE_(me); /* this active object */
QS_OBJ_PRE_(eq); /* the deferred queue */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
recalled = true;
}
else {
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_ACTIVE_RECALL_ATTEMPT, me->prio)
QS_TIME_PRE_(); /* time stamp */
QS_OBJ_PRE_(me); /* this active object */
QS_OBJ_PRE_(eq); /* the deferred queue */
QS_END_PRE_()
recalled = false;
}
return recalled;
const
/*! Flush the specified deferred queue 'eq'
* @protected @memberof QActive
*
* @details
* This function is part of the event deferral support. An active object
* can use this function to flush a given QF event queue. The function
* makes sure that the events are not leaked.
*
* @param[in] eq pointer to a "raw" thread-safe queue to flush.
*
* @returns
* the number of events actually flushed from the queue.
*
* @sa
* QActive_defer(), QActive_recall(), ::QEQueue
*/
uint_fast16_t n = 0U;
for (QEvt const *e = QEQueue_get(eq, me->prio);
e != (QEvt *)0;
e = QEQueue_get(eq, me->prio))
{
++n; /* count the flushed event */
#if (QF_MAX_EPOOL > 0U)
QF_gc(e); /* garbage collect */
#endif
}
return n;
/*! Generic setting of additional attributes (useful in QP ports)
* @public @memberof QActive
*/
/*! Thread routine for executing an active object `act`
* @private @memberof QActive
*/
/*! Register this active object to be managed by the framework
* @protected @memberof QActive
*
* @details
* This function adds a given active object to the active objects
* managed by the QF framework. It should not be called by the
* application directly, only through the function QActive::start().
*
* @note
* The priority of the active object a should be set before calling
* this function.
*
* @sa QActive_unregister_()
*/
if (me->pthre == 0U) { /* preemption-threshold not defined? */
me->pthre = me->prio; /* apply the default */
}
#ifndef Q_NASSERT
/*! @pre
* 1. the "QF-priority" of the AO must be in range
* 2. the "QF-priority" must not be already in use (unique priority)
* 3. the "QF-priority" must not exceed the "preemption-threshold"
*/
Q_REQUIRE_ID(100, (0U < me->prio) && (me->prio <= QF_MAX_ACTIVE)
&& (QActive_registry_[me->prio] == (QActive *)0)
&& (me->prio <= me->pthre));
uint8_t prev_thre = me->pthre;
uint8_t next_thre = me->pthre;
uint_fast8_t p;
for (p = (uint_fast8_t)me->prio - 1U; p > 0U; --p) {
if (QActive_registry_[p] != (QActive *)0) {
prev_thre = QActive_registry_[p]->pthre;
break;
}
}
for (p = (uint_fast8_t)me->prio + 1U; p <= QF_MAX_ACTIVE; ++p) {
if (QActive_registry_[p] != (QActive *)0) {
next_thre = QActive_registry_[p]->pthre;
break;
}
}
/*! @post
* 1. the preceding pre-thre must not exceed the preemption-threshold
* 2. the preemption-threshold must not exceed the next pre-thre
*/
Q_ENSURE_ID(101, (prev_thre <= me->pthre) && (me->pthre <= next_thre));
#endif // Q_NASSERT
QF_CRIT_STAT_
QF_CRIT_E_();
/* register the AO at the "QF-priority" */
QActive_registry_[me->prio] = me;
QF_CRIT_X_();
/*! Un-register the active object from the framework
* @protected @memberof QActive
*
* @details
* This function un-registers a given active object from the active objects
* managed by the QF framework. It should not be called by the QP ports.
*
* @param[in] me pointer to the active object to remove from the
* framework.
*
* @note
* The active object that is removed from the framework can no longer
* participate in any event exchange.
*
* @sa QActive_register_()
*/
uint_fast8_t const p = (uint_fast8_t)me->prio;
/*! @pre the priority of the active object must not be zero and cannot
* exceed the maximum #QF_MAX_ACTIVE. Also, the priority of the active
* object must be already registered with the framework.
*/
Q_REQUIRE_ID(200, (0U < p) && (p <= QF_MAX_ACTIVE)
&& (QActive_registry_[p] == me));
QF_CRIT_STAT_
QF_CRIT_E_();
QActive_registry_[p] = (QActive *)0; /* free-up the priority level */
me->super.state.fun = Q_STATE_CAST(0); /* invalidate the state */
QF_CRIT_X_();
/*! the "FromISR" variant used in the QP port to "FreeRTOS"
* @private @memberof QActive
*/
/*! the "FromISR" variant used in the QP port to "FreeRTOS"
* @private @memberof QActive
*/
/*! @brief Virtual table for the QActive class */
{
struct QHsmVtable super; /*!< @protected inherits ::QHsmVtable */
/*! @private virtual function to start the AO/thread
* @sa QACTIVE_START()
*/
void (*start)(QActive * const me, QPrioSpec prio,
QEvt const * * const qSto, uint_fast16_t const qLen,
void * const stkSto, uint_fast16_t const stkSize,
void const * const par);
/*! @private virtual function to asynchronously post (FIFO)
* an event to the AO
* @sa QACTIVE_POST() and QACTIVE_POST_X()
*/
bool (*post)(QActive * const me, QEvt const * const e,
uint_fast16_t const margin, void const * const sender);
/*! @private virtual function to asynchronously post (LIFO)
* an event to the AO
* @sa QACTIVE_POST_LIFO()
*/
void (*postLIFO)(QActive * const me, QEvt const * const e);
} QActiveVtable;
/*! @brief Active object class (based on QMsm implementation strategy)
* @class QMActive
* @extends QActive
*
* @details
* ::QMActive represents an active object that uses the ::QMsm style state
* machine implementation strategy. This strategy requires the use of the
* QM modeling tool to generate state machine code automatically, but the
* code is faster than in the ::QHsm style implementation strategy and needs
* less run-time support (smaller event-processor).
*
* @note
* ::QMActive is not intended to be instantiated directly, but rather serves
* as the base class for derivation of active objects in the application.
*
* @tr{AQP214}
*
* @usage
* The following example illustrates how to derive an active object from
* ::QMActive. Please note that the ::QActive member @c super is defined as
* the **first** member of the derived struct (see @ref oop).
* @include qf_qmactive.c
*/
/*! Constructor of ::QMActive class.
* @protected @memberof QMActive
*
* @details
* Performs the first step of active object initialization by assigning
* the virtual pointer and calling the superclass constructor.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] initial pointer to the event to be dispatched to the MSM
*
* @note Must be called only ONCE before QHSM_INIT().
*
* @sa QHsm_ctor()
*/
static QMActiveVtable const vtable = { /* QMActive virtual table */
{ &QMsm_init_,
&QMsm_dispatch_
#ifdef Q_SPY
,&QMsm_getStateHandler_
#endif
},
&QActive_start_,
&QActive_post_,
&QActive_postLIFO_
};
/* clear the whole QMActive object, so that the framework can start
* correctly even if the startup code fails to clear the uninitialized
* data (as is required by the C Standard).
*/
QF_bzero(me, sizeof(*me));
/*!
* @note
* ::QMActive inherits ::QActive, so by the @ref oop convention
* it should call the constructor of the superclass, i.e., QActive_ctor().
* However, this would pull in the QActiveVtable, which in turn will pull
* in the code for QHsm_init_() and QHsm_dispatch_() implemetations,
* which is expensive. To avoid this code size penalty, in case QHsm is
* not used in a given project, the call to QMsm_ctor() avoids pulling
* in the code for QHsm.
*/
QMsm_ctor(QMSM_CAST_(&me->super.super), initial);
me->super.super.vptr = &vtable.super; /* hook vptr to QMActive vtable */
/*! @brief Virtual Table for the ::QMActive class (inherited
* from ::QActiveVtable)
*
* @note
* ::QMActive inherits ::QActive exactly, without adding any new virtual
* functions and therefore, ::QMActiveVtable is typedef'ed as ::QActiveVtable.
*/
/*! @brief Time Event class
* @class QTimeEvt
* @extends QEvt
*
* @details
* Time events are special QF events equipped with the notion of time passage.
* The basic usage model of the time events is as follows. An active object
* allocates one or more ::QTimeEvt objects (provides the storage for them).
* When the active object needs to arrange for a timeout, it arms one of its
* time events to fire either just once (one-shot) or periodically. Each time
* event times out independently from the others, so a QF application can make
* multiple parallel timeout requests (from the same or different active
* objects). When QF detects that the appropriate moment has arrived, it
* inserts the time event directly into the recipient's event queue. The
* recipient then processes the time event just like any other event.
*
* Time events, as any other QF events derive from the ::QEvt base class.
* Typically, you will use a time event as-is, but you can also further
* derive more specialized time events from it by adding some more data
* members and/or specialized functions that operate on the specialized
* time events.
*
* Internally, the armed time events are organized into linked lists--one
* list for every supported ticking rate. These linked lists are scanned in
* every invocation of the QTIMEEVT_TICK_X() macro. Only armed (timing out)
* time events are in the list, so only armed time events consume CPU cycles.
*
* @sa ::QTimeEvt for the description of the data members
*
* @tr{AQP215}
*
* @note
* QF manages the time events in the QTIMEEVT_TICK_X() macro, which must
* be called periodically, from the clock tick ISR or from other periodic
* source. QTIMEEVT_TICK_X() caYou might also use the special ::QTicker
* active object.
*
* @note
* Even though ::QTimeEvt is a subclass of ::QEvt, ::QTimeEvt instances can NOT
* be allocated dynamically from event pools. In other words, it is illegal to
* allocate ::QTimeEvt instances with the Q_NEW() or Q_NEW_X() macros.
*/
/*! link to the next time event in the list
* @private @memberof QTimeEvt
*/
/*! The active object that receives the time events
* @private @memberof QTimeEvt
*/
/*! Internal down-counter of the time event.
* @private @memberof QTimeEvt
*
* @details
* The down-counter is decremented by 1 in every QTimeEvt_tick_() call.
* The time event fires (gets posted or published) when the down-counter
* reaches zero.
*/
/*! Interval for periodic time event (zero for one-shot time event)
* @private @memberof QTimeEvt
*
* @details
* The value of the interval is re-loaded to the internal down-counter
* when the time event expires, so that the time event keeps timing out
* periodically.
*/
/*! heads of linked lists of time events, one for every clock tick rate */
/*! The extended "constructor" to initialize a Time Event.
* @public @memberof QTimeEvt
*
* @details
* When creating a time event, you must commit it to a specific active object
* `act`, tick rate `tickRate` and event signal `sig`. You cannot change
* these attributes later.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] act pointer to the active object associated with this
* time event. The time event will post itself to this AO.
* @param[in] sig signal to associate with this time event.
* @param[in] tickRate system clock tick rate to associate with this
* time event in the range [0..15].
*
* @note You should call the constructor exactly once for every Time Event
* object **before** arming the Time Event. The ideal place for initializing
* the time event(s) associated with a given AO is the AO's constructor.
*/
/*! @pre The signal must be valid and the tick rate in range */
Q_REQUIRE_ID(300, (sig != 0)
&& (tickRate < QF_MAX_TICK_RATE));
me->next = (QTimeEvt *)0;
me->ctr = 0U;
me->interval = 0U;
me->super.sig = (QSignal)sig;
/* For backwards compatibility with QTimeEvt_ctor(), the active object
* pointer can be uninitialized (NULL) and is NOT validated in the
* precondition. The active object pointer is validated in preconditions
* to QTimeEvt_arm_() and QTimeEvt_rearm().
*/
me->act = act;
/* Setting the POOL_ID event attribute to zero is correct only for
* events not allocated from event pools, which must be the case
* for Time Events.
*/
me->super.poolId_ = 0U;
/* The refCtr_ attribute is not used in time events, so it is
* reused to hold the tickRate as well as other information
*/
me->super.refCtr_ = (uint8_t)tickRate;
/*! Arm a time event (one shot or periodic) for direct event posting.
* @public @memberof QTimeEvt
*
* @details
* Arms a time event to fire in a specified number of clock ticks and with
* a specified interval. If the interval is zero, the time event is armed for
* one shot ('one-shot' time event). When the timeout expires, the time event
* gets directly posted (using the FIFO policy) into the event queue of the
* host active object. After posting, a one-shot time event gets automatically
* disarmed while a periodic time event (interval != 0) is automatically
* re-armed.
*
* A time event can be disarmed at any time by calling QTimeEvt_disarm().
* Also, a time event can be re-armed to fire in a different number of clock
* ticks by calling the QTimeEvt_rearm().
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] nTicks number of clock ticks (at the associated rate)
* to rearm the time event with.
* @param[in] interval interval (in clock ticks) for periodic time event.
*
* @attention
* Arming an already armed time event is __not__ allowed and is considered
* a programming error. The QP/C framework will assert if it detects an
* attempt to arm an already armed time event.
*
* @usage
* The following example shows how to arm a periodic time event as well as
* one-shot time event from a state machine of an active object:
* @include qf_tevt.c
*/
uint_fast8_t const tickRate
= ((uint_fast8_t)me->super.refCtr_ & QTE_TICK_RATE);
QTimeEvtCtr const ctr = me->ctr;
#ifdef Q_SPY
uint_fast8_t const qs_id = ((QActive *)(me->act))->prio;
#endif
/*! @pre the host AO must be valid, time evnet must be disarmed,
* number of clock ticks cannot be zero, and the signal must be valid.
*/
Q_REQUIRE_ID(400, (me->act != (void *)0)
&& (ctr == 0U)
&& (nTicks != 0U)
&& (tickRate < (uint_fast8_t)QF_MAX_TICK_RATE)
&& (me->super.sig >= (QSignal)Q_USER_SIG));
#ifdef Q_NASSERT
Q_UNUSED_PAR(ctr);
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
me->ctr = nTicks;
me->interval = interval;
/* is the time event unlinked?
* NOTE: For the duration of a single clock tick of the specified tick
* rate a time event can be disarmed and yet still linked into the list,
* because un-linking is performed exclusively in QTimeEvt_tick_().
*/
if ((me->super.refCtr_ & QTE_IS_LINKED) == 0U) {
me->super.refCtr_ |= QTE_IS_LINKED; /* mark as linked */
/* The time event is initially inserted into the separate
* "freshly armed" link list based on QTimeEvt_timeEvtHead_[tickRate].act.
* Only later, inside the QTimeEvt_tick_() function, the "freshly armed"
* list is appended to the main list of armed time events based on
* QTimeEvt_timeEvtHead_[tickRate].next. Again, this is to keep any
* changes to the main list exclusively inside the QTimeEvt_tick_()
* function.
*/
me->next = (QTimeEvt *)QTimeEvt_timeEvtHead_[tickRate].act;
QTimeEvt_timeEvtHead_[tickRate].act = me;
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_ARM, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this time event object */
QS_OBJ_PRE_(me->act); /* the active object */
QS_TEC_PRE_(nTicks); /* the number of ticks */
QS_TEC_PRE_(interval); /* the interval */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
/*! Disarm a time event.
* @public @memberof QTimeEvt
*
* @details
* Disarm the time event so it can be safely reused.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* 'true' if the time event was truly disarmed, that is, it was running.
* The return of 'false' means that the time event was not truly disarmed,
* because it was not running. The 'false' return is only possible for one-
* shot time events that have been automatically disarmed upon expiration.
* In this case the 'false' return means that the time event has already
* been posted or published and should be expected in the active object's
* state machine.
*
* @note
* there is no harm in disarming an already disarmed time event
*/
#ifdef Q_SPY
uint_fast8_t const qs_id = QACTIVE_CAST_(me->act)->prio;
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
/* is the time event actually armed? */
bool wasArmed;
if (me->ctr != 0U) {
wasArmed = true;
me->super.refCtr_ |= QTE_WAS_DISARMED;
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_DISARM, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this time event object */
QS_OBJ_PRE_(me->act); /* the target AO */
QS_TEC_PRE_(me->ctr); /* the number of ticks */
QS_TEC_PRE_(me->interval); /* the interval */
QS_U8_PRE_(me->super.refCtr_ & QTE_TICK_RATE);
QS_END_NOCRIT_PRE_()
me->ctr = 0U; /* schedule removal from the list */
}
else { /* the time event was already disarmed automatically */
wasArmed = false;
me->super.refCtr_ &= (uint8_t)(~QTE_WAS_DISARMED & 0xFFU);
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_DISARM_ATTEMPT, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this time event object */
QS_OBJ_PRE_(me->act); /* the target AO */
QS_U8_PRE_(me->super.refCtr_ & QTE_TICK_RATE);
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return wasArmed;
/*! Rearm a time event.
* @public @memberof QTimeEvt
*
* @details
* Rearms a time event with a new number of clock ticks. This function can
* be used to adjust the current period of a periodic time event or to
* prevent a one-shot time event from expiring (e.g., a watchdog time event).
* Rearming a periodic timer leaves the interval unchanged and is a convenient
* method to adjust the phasing of a periodic time event.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] nTicks number of clock ticks (at the associated rate)
* to rearm the time event with.
*
* @returns
* 'true' if the time event was running as it was re-armed. The 'false'
* return means that the time event was not truly rearmed because it was
* not running. The 'false' return is only possible for one-shot time events
* that have been automatically disarmed upon expiration. In this case the
* 'false' return means that the time event has already been posted or
* published and should be expected in the active object's state machine.
*/
uint_fast8_t const tickRate
= (uint_fast8_t)me->super.refCtr_ & QTE_TICK_RATE;
#ifdef Q_SPY
uint_fast8_t const qs_id = ((QActive *)(me->act))->prio;
#endif
/*! @pre AO must be valid, tick rate must be in range, nTicks must not
* be zero, and the signal of this time event must be valid
*/
Q_REQUIRE_ID(600, (me->act != (void *)0)
&& (tickRate < QF_MAX_TICK_RATE)
&& (nTicks != 0U)
&& (me->super.sig >= (QSignal)Q_USER_SIG));
QF_CRIT_STAT_
QF_CRIT_E_();
/* is the time evt not running? */
bool wasArmed;
if (me->ctr == 0U) {
wasArmed = false;
/* NOTE: For the duration of a single clock tick of the specified
* tick rate a time event can be disarmed and yet still linked into
* the list, because unlinking is performed exclusively in the
* QTimeEvt_tick_() function.
*/
/* is the time event linked yet? */
if ((me->super.refCtr_ & QTE_IS_LINKED) == 0U) {
me->super.refCtr_ |= QTE_IS_LINKED; /* mark as linked */
/* The time event is initially inserted into the separate
* "freshly armed" list based on QTimeEvt_timeEvtHead_[tickRate].act.
* Only later, inside the QTimeEvt_tick_() function, the "freshly
* armed" list is appended to the main list of armed time events
* based on QTimeEvt_timeEvtHead_[tickRate].next. Again, this is
* to keep any changes to the main list exclusively inside the
* QTimeEvt_tick_() function.
*/
me->next = (QTimeEvt *)QTimeEvt_timeEvtHead_[tickRate].act;
QTimeEvt_timeEvtHead_[tickRate].act = me;
}
}
else { /* the time event was armed */
wasArmed = true;
}
me->ctr = nTicks; /* re-load the tick counter (shift the phasing) */
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_REARM, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this time event object */
QS_OBJ_PRE_(me->act); /* the target AO */
QS_TEC_PRE_(me->ctr); /* the number of ticks */
QS_TEC_PRE_(me->interval); /* the interval */
QS_2U8_PRE_(tickRate, (wasArmed ? 1U : 0U));
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return wasArmed;
/*! Check the "was disarmed" status of a time event.
* @public @memberof QTimeEvt
*
* @details
* Useful for checking whether a one-shot time event was disarmed in the
* QTimeEvt_disarm() operation.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* 'true' if the time event was truly disarmed in the last QTimeEvt_disarm()
* operation. The 'false' return means that the time event was not truly
* disarmed, because it was not running at that time. The 'false' return is
* only possible for one-shot time events that have been automatically disarmed
* upon expiration. In this case the 'false' return means that the time event
* has already been posted or published and should be expected in the active
* object's event queue.
*
* @note
* This function has a **side effect** of setting the "was disarmed" status,
* which means that the second and subsequent times this function is called
* the function will return 'true'.
*/
uint8_t const wasDisarmed = (me->super.refCtr_ & QTE_WAS_DISARMED);
me->super.refCtr_ |= QTE_WAS_DISARMED; /* mark as disarmed */
return wasDisarmed != 0U;
const
/*! Get the current value of the down-counter of a time event.
* @public @memberof QTimeEvt
*
* @details
* Useful for checking how many clock ticks (at the tick rate associated
* with the time event) remain until the time event expires.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* For an armed time event, the function returns the current value of the
* down-counter of the given time event. If the time event is not armed,
* the function returns 0.
*
* @note
* The function is thread-safe.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QTimeEvtCtr const ret = me->ctr;
QF_CRIT_X_();
return ret;
/*! Processes all armed time events at every clock tick.
* @static @private @memberof QTimeEvt
*
* @details
* This internal helper function processes all armed ::QTimeEvt objects
* associated wit the tick rate `tickRate`.
*
* This function must be called periodically from a time-tick ISR or from
* a task so that QF can manage the timeout events assigned to the given
* system clock tick rate.
*
* @param[in] tickRate clock tick rate serviced in this call [1..15].
* @param[in] sender pointer to a sender object (only for QS tracing)
*
* @note
* this function should be called only via the macro QTIMEEVT_TICK_X()
*
* @note
* the calls to QTimeEvt_tick_() with different `tickRate` parameter can
* preempt each other. For example, higher clock tick rates might be
* serviced from interrupts while others from tasks (active objects).
*
* @sa ::QTimeEvt.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(sender);
#endif
QTimeEvt *prev = &QTimeEvt_timeEvtHead_[tickRate];
QF_CRIT_STAT_
QF_CRIT_E_();
QS_BEGIN_NOCRIT_PRE_(QS_QF_TICK, 0U)
++prev->ctr;
QS_TEC_PRE_(prev->ctr); /* tick ctr */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
/* scan the linked-list of time events at this rate... */
for (;;) {
QTimeEvt *t = prev->next; /* advance down the time evt. list */
/* end of the list? */
if (t == (QTimeEvt *)0) {
/* any new time events armed since the last QTimeEvt_tick_()? */
if (QTimeEvt_timeEvtHead_[tickRate].act != (void *)0) {
/* sanity check */
Q_ASSERT_CRIT_(110, prev != (QTimeEvt *)0);
prev->next = (QTimeEvt *)QTimeEvt_timeEvtHead_[tickRate].act;
QTimeEvt_timeEvtHead_[tickRate].act = (void *)0;
t = prev->next; /* switch to the new list */
}
else {
break; /* all currently armed time evts. processed */
}
}
/* time event scheduled for removal? */
if (t->ctr == 0U) {
prev->next = t->next;
/* mark time event 't' as NOT linked */
t->super.refCtr_ &= (uint8_t)(~QTE_IS_LINKED & 0xFFU);
/* do NOT advance the prev pointer */
QF_CRIT_X_(); /* exit crit. section to reduce latency */
/* prevent merging critical sections, see NOTE1 below */
QF_CRIT_EXIT_NOP();
}
else {
--t->ctr;
/* is time event about to expire? */
if (t->ctr == 0U) {
/* temporary for volatile */
QActive * const act = (QActive *)t->act;
/* periodic time evt? */
if (t->interval != 0U) {
t->ctr = t->interval; /* rearm the time event */
prev = t; /* advance to this time event */
}
/* one-shot time event: automatically disarm */
else {
prev->next = t->next;
/* mark time event 't' as NOT linked */
t->super.refCtr_ &= (uint8_t)(~QTE_IS_LINKED & 0xFFU);
/* do NOT advance the prev pointer */
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_AUTO_DISARM, act->prio)
QS_OBJ_PRE_(t); /* this time event object */
QS_OBJ_PRE_(act); /* the target AO */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_POST, act->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(t); /* the time event object */
QS_SIG_PRE_(t->super.sig); /* signal of this time event */
QS_OBJ_PRE_(act); /* the target AO */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_(); /* exit critical section before posting */
/* QACTIVE_POST() asserts internally if the queue overflows */
QACTIVE_POST(act, &t->super, sender);
}
else {
prev = t; /* advance to this time event */
QF_CRIT_X_(); /* exit crit. section to reduce latency */
/* prevent merging critical sections
* In some QF ports the critical section exit takes effect only
* on the next machine instruction. If this case, the next
* instruction is another entry to a critical section, the
* critical section won't be really exited, but rather the
* two adjacent critical sections would be merged. The
* QF_CRIT_EXIT_NOP() macro contains minimal code required
* to prevent such merging of critical sections in QF ports,
* in which it can occur.
*/
QF_CRIT_EXIT_NOP();
}
}
QF_CRIT_E_(); /* re-enter crit. section to continue */
}
QF_CRIT_X_();
/*! Processes one clock tick for QUTest */
/*! Returns 'true' if there are no armed time events at a given tick rate.
* @static @public @memberof QTimeEvt
*
* @details
* Find out if any time events are armed at the given clock tick rate.
*
* @param[in] tickRate system clock tick rate to find out about.
*
* @returns
* 'true' if no time events are armed at the given tick rate and
* 'false' otherwise.
*
* @note
* This function should be called in critical section.
*/
/*! @pre the tick rate must be in range */
Q_REQUIRE_ID(200, tickRate < QF_MAX_TICK_RATE);
bool inactive;
if (QTimeEvt_timeEvtHead_[tickRate].next != (QTimeEvt *)0) {
inactive = false;
}
else if ((QTimeEvt_timeEvtHead_[tickRate].act != (void *)0)) {
inactive = false;
}
else {
inactive = true;
}
return inactive;
/*! @brief "Ticker" Active Object class
* @class QTicker
* @extends QActive
*
* @details
* QTicker is an efficient active object specialized to process QF system
* clock tick at a specified tick rate [0..#QF_MAX_TICK_RATE].
* Placing system clock tick processing in an active object allows you
* to remove the non-deterministic QTIMEEVT_TICK_X() processing from the
* interrupt level and move it into the thread-level, where you can prioritize
* it as low as you wish.
*
* @usage
* The following example illustrates use of QTicker active objects:
* @include qf_ticker.c
*/
/*! Constructor of the QTicker Active Object class
* @public @memberof QTicker
*/
static QActiveVtable const vtable = { /* QActive virtual table */
{ &QTicker_init_,
&QTicker_dispatch_
#ifdef Q_SPY
,&QHsm_getStateHandler_
#endif
},
&QActive_start_,
&QTicker_post_,
&QTicker_postLIFO_
};
QActive_ctor(&me->super, Q_STATE_CAST(0)); /* superclass' ctor */
me->super.super.vptr = &vtable.super; /* hook the vptr */
/* reuse eQueue.head for tick-rate */
me->super.eQueue.head = (QEQueueCtr)tickRate;
/*! initialization (override)
* @private @memberof QTicker
*/
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(par);
Q_UNUSED_PAR(qs_id);
QTICKER_CAST_(me)->eQueue.tail = 0U;
/*! dispatching (override)
* @private @memberof QTicker
*/
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nTicks = QTICKER_CAST_(me)->eQueue.tail; /* save # of ticks */
QTICKER_CAST_(me)->eQueue.tail = 0U; /* clear # ticks */
QF_CRIT_X_();
for (; nTicks > 0U; --nTicks) {
QTimeEvt_tick_((uint_fast8_t)QTICKER_CAST_(me)->eQueue.head, me);
}
/*! post (override)
* @private @memberof QTicker
*/
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(margin);
#ifndef Q_SPY
Q_UNUSED_PAR(sender);
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
if (me->eQueue.frontEvt == (QEvt *)0) {
static QEvt const tickEvt = { 0U, 0U, 0U };
me->eQueue.frontEvt = &tickEvt; /* deliver event directly */
--me->eQueue.nFree; /* one less free event */
QACTIVE_EQUEUE_SIGNAL_(me); /* signal the event queue */
}
++me->eQueue.tail; /* account for one more tick event */
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(0U); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(0U, 0U); /* pool Id & refCtr of the evt */
QS_EQC_PRE_(0U); /* number of free entries */
QS_EQC_PRE_(0U); /* min number of free entries */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return true; /* the event is always posted correctly */
/*! post-LIFO (override)
* @private @memberof QTicker
*/
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(e);
Q_ERROR_ID(900);
/*! @brief Native QF Event Queue
* @class QEQueue
*
* @details
* This class describes the native QF event queue, which can be used as
* the event queue for active objects, or as a simple "raw" event queue for
* thread-safe event passing among non-framework entities, such as ISRs,
* device drivers, or other third-party components.<br>
*
* The native QF event queue is configured by defining the macro
* #QF_EQUEUE_TYPE as ::QEQueue in the specific QF port header file.<br>
* <br>
* The ::QEQueue structure contains only data members for managing an event
* queue, but does not contain the storage for the queue buffer, which must
* be provided externally during the queue initialization.<br>
* <br>
* The event queue can store only event pointers, not the whole events. The
* internal implementation uses the standard ring-buffer plus one external
* location that optimizes the queue operation for the most frequent case
* of empty queue.<br>
* <br>
* The ::QEQueue structure is used with two sets of functions. One set is for
* the active object event queue, which might need to block the active object
* task when the event queue is empty and might need to unblock it when
* events are posted to the queue. The interface for the native active object
* event queue consists of the following functions: QActive_post(),
* QActive_postLIFO(), and QActive_get_(). Additionally the function
* QEQueue_init() is used to initialize the queue.<br>
* <br>
* The other set of functions, uses ::QEQueue as a simple "raw" event
* queue to pass events between entities other than active objects, such as
* ISRs. The "raw" event queue is not capable of blocking on the get()
* operation, but is still thread-safe because it uses QF critical section
* to protect its integrity. The interface for the "raw" thread-safe queue
* consists of the following functions: QEQueue_post(),
* QEQueue_postLIFO(), and QEQueue_get(). Additionally the function
* QEQueue_init() is used to initialize the queue.
*
* <br>ote Most event queue operations (both the active object queues and
* the "raw" queues) internally use the QF critical section. You should be
* careful not to invoke those operations from other critical sections when
* nesting of critical sections is not supported.
*
* @sa ::QEQueue for the description of the data members
*/
/*! pointer to event at the front of the queue.
* @private @memberof QEQueue
*
* @details
* All incoming and outgoing events pass through the frontEvt location.
* When the queue is empty (which is most of the time), the extra
* frontEvt location allows to bypass the ring buffer altogether,
* greatly optimizing the performance of the queue. Only bursts of events
* engage the ring buffer.
*
* <br>ote The additional role of this attribute is to indicate the empty
* status of the queue. The queue is empty when frontEvt is NULL.
*/
/*! pointer to the start of the ring buffer
* @private @memberof QEQueue
*/
/*! offset of the end of the ring buffer from the start of the buffer
* @private @memberof QEQueue
*/
/*! offset to where next event will be inserted into the buffer
* @private @memberof QEQueue
*/
/*! offset of where next event will be extracted from the buffer
* @private @memberof QEQueue
*/
/*! number of free events in the ring buffer
* @private @memberof QEQueue
*/
/*! Minimum number of free events ever in the ring buffer.
* @private @memberof QEQueue
*
* @details
* This attribute remembers the low-watermark of the ring buffer,
* which provides a valuable information for sizing event queues.
* @sa QF_getQueueMargin().
*/
/*! Initialize the native QF event queue.
* @public @memberof QEQueue
*
* @details
* Initialize the event queue by giving it the storage for the ring buffer.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] qSto an array of pointers to ::QEvt to sereve as the
* ring buffer for the event queue
* @param[in] qLen the length of the `qSto` buffer (in ::QEvt pointers)
*
* @note The actual capacity of the queue is qLen + 1, because of the extra
* location forntEvt.
*
* @note
* This function is also used to initialize the event queues of active
* objects in the built-int QV and QK kernels, as well as other
* QP ports to OSes/RTOSes that do provide a suitable message queue.
*/
me->frontEvt = (QEvt *)0; /* no events in the queue */
me->ring = qSto; /* the beginning of the ring buffer */
me->end = (QEQueueCtr)qLen;
if (qLen != 0U) {
me->head = 0U;
me->tail = 0U;
}
me->nFree = (QEQueueCtr)(qLen + 1U); /* +1 for frontEvt */
me->nMin = me->nFree;
/*! Post an event to the "raw" thread-safe event queue (FIFO).
* @public @memberof QEQueue
*
* @details
* Post an event to the "raw" thread-safe event queue using the
* First-In-First-Out (FIFO) order.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be posted to the queue
* @param[in] margin number of required free slots in the queue after
* posting the event. The special value #QF_NO_MARGIN
* means that this function will assert if posting
* @note
* The #QF_NO_MARGIN value of the `margin` parameter is special and
* denotes situation when the post() operation is assumed to succeed (event
* delivery guarantee). An assertion fires, when the event cannot be
* delivered in this case.
*
* @returns 'true' (success) when the posting succeeded with the provided
* margin and 'false' (failure) when the posting fails.
*
* @note This function can be called from any task context or ISR context.
*
* @sa QEQueue_postLIFO(), QEQueue_get()
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
/* @pre event must be valid */
Q_REQUIRE_ID(200, e != (QEvt *)0);
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = me->nFree; /* get volatile into the temporary */
/* required margin available? */
bool status;
if (((margin == QF_NO_MARGIN) && (nFree > 0U))
|| (nFree > (QEQueueCtr)margin))
{
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
--nFree; /* one free entry just used up */
me->nFree = nFree; /* update the volatile */
if (me->nMin > nFree) {
me->nMin = nFree; /* update minimum so far */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this queue object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(me->nMin); /* min number of free entries */
QS_END_NOCRIT_PRE_()
/* was the queue empty? */
if (me->frontEvt == (QEvt *)0) {
me->frontEvt = e; /* deliver event directly */
}
/* queue was not empty, insert event into the ring-buffer */
else {
/* insert event into the ring buffer (FIFO)... */
me->ring[me->head] = e; /* insert e into buffer */
/* need to wrap the head? */
if (me->head == 0U) {
me->head = me->end; /* wrap around */
}
--me->head;
}
status = true; /* event posted successfully */
}
else {
/*! @note assert if event cannot be posted and dropping events is
* not acceptable
*/
Q_ASSERT_CRIT_(210, margin != QF_NO_MARGIN);
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST_ATTEMPT, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this queue object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(margin); /* margin requested */
QS_END_NOCRIT_PRE_()
status = false;
}
QF_CRIT_X_();
return status;
/*! Post an event to the "raw" thread-safe event queue (LIFO).
* @public @memberof QEQueue
*
* @details
* Post an event to the "raw" thread-safe event queue using the
* Last-In-First-Out (LIFO) order.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be posted to the queue
*
* @attention
* The LIFO policy should be used only with great __caution__, because
* it alters the order of events in the queue.
*
* @note
* This function can be called from any task context or ISR context.
*
* @note
* this function is used for the "raw" thread-safe queues and __not__
* for the queues of active objects.
*
* @sa
* QEQueue_post(), QEQueue_get(), QActive_defer()
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
QEQueueCtr nFree = me->nFree; /* get volatile into the temporary */
/*! @pre the queue must be able to accept the event (cannot overflow) */
Q_REQUIRE_CRIT_(300, nFree != 0U);
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
--nFree; /* one free entry just used up */
me->nFree = nFree; /* update the volatile */
if (me->nMin > nFree) {
me->nMin = nFree; /* update minimum so far */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_POST_LIFO, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this queue object */
QS_2U8_PRE_(e->poolId_, e->refCtr_);/* pool Id & ref Count of event */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(me->nMin); /* min number of free entries */
QS_END_NOCRIT_PRE_()
QEvt const * const frontEvt = me->frontEvt; /* read volatile into temp */
me->frontEvt = e; /* deliver event directly to the front of the queue */
/* was the queue not empty? */
if (frontEvt != (QEvt *)0) {
++me->tail;
if (me->tail == me->end) { /* need to wrap the tail? */
me->tail = 0U; /* wrap around */
}
me->ring[me->tail] = frontEvt; /* save old front evt */
}
QF_CRIT_X_();
/*! Obtain an event from the "raw" thread-safe queue.
* @public @memberof QEQueue
*
* @details
* Retrieves an event from the front of the "raw" thread-safe queue and
* returns a pointer to this event to the caller.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* pointer to event at the front of the queue, if the queue is
* not empty and NULL if the queue is empty.
*
* @note
* this function is used for the "raw" thread-safe queues and __not__
* for the queues of active objects.
*
* @sa
* QEQueue_post(), QEQueue_postLIFO(), QActive_recall()
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
QEvt const * const e = me->frontEvt; /* remove event from the front */
/* was the queue not empty? */
if (e != (QEvt *)0) {
/* use a temporary variable to increment volatile me->nFree */
QEQueueCtr const nFree = me->nFree + 1U;
me->nFree = nFree; /* update the number of free */
/* any events in the ring buffer? */
if (nFree <= me->end) {
me->frontEvt = me->ring[me->tail]; /* get from tail */
if (me->tail == 0U) { /* need to wrap the tail? */
me->tail = me->end; /* wrap around */
}
--me->tail;
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_GET, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this queue object */
QS_2U8_PRE_(e->poolId_, e->refCtr_);/* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_END_NOCRIT_PRE_()
}
else {
me->frontEvt = (QEvt *)0; /* queue becomes empty */
/* all entries in the queue must be free (+1 for fronEvt) */
Q_ASSERT_CRIT_(410, nFree == (me->end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_EQUEUE_GET_LAST, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this queue object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
}
}
QF_CRIT_X_();
return e;
const
/*! "raw" thread-safe QF event queue operation for obtaining the number
* of free entries still available in the queue.
* @public @memberof QEQueue
*
* @details
* This operation needs to be used with caution because the number of free
* entries can change unexpectedly. The main intent for using this operation
* is in conjunction with event deferral. In this case the queue is accessed
* only from a single thread (by a single AO), so the number of free
* entries cannot change unexpectedly.
*
* @param[in] me current instance pointer (see @ref oop)
*
* @returns the current number of free slots in the queue.
*/
return me->nFree;
const
/*! "raw" thread-safe QF event queue operation for obtaining the minimum
* number of free entries ever in the queue (a.k.a. "low-watermark").
* @public @memberof QEQueue
*
* @details
* This operation needs to be used with caution because the "low-watermark"
* can change unexpectedly. The main intent for using this operation is to
* get an idea of queue usage to size the queue adequately.
*
* @param[in] me current instance pointer (see @ref oop)
*
* @returns the minimum number of free entries ever in the queue since init.
*/
return me->nMin;
const
/*! "raw" thread-safe QF event queue operation to find out if the queue
* is empty.
* @public @memberof QEQueue
*
* @details
* This operation needs to be used with caution because the queue status
* can change unexpectedly. The main intent for using this operation is in
* conjunction with event deferral. In this case the queue is accessed only
* from a single thread (by a single AO), so no other entity can post
* events to the queue.
*
* @param[in] me_ current instance pointer (see @ref oop)
*
* @returns 'true' if the queue is current empty and 'false' otherwise.
*/
return me->frontEvt == (QEvt *)0;
/*! @brief Native QF Memory Pool
* @class QMPool
*
* @details
* A fixed block-size memory pool is a very fast and efficient data
* structure for dynamic allocation of fixed block-size chunks of memory.
* A memory pool offers fast and deterministic allocation and recycling of
* memory blocks and is not subject to fragmenation.<br>
*
* The ::QMPool class describes the native QF memory pool, which can be used as
* the event pool for dynamic event allocation, or as a fast, deterministic
* fixed block-size heap for any other objects in your application.
*
* @note
* ::QMPool contains only data members for managing a memory pool, but
* does not contain the pool storage, which must be provided externally
* during the pool initialization.
*
* @note
* The native QF event pool is configured by defining the macro
* #QF_EPOOL_TYPE_ as ::QMPool in the specific QF port header file.
*/
/*! start of the memory managed by this memory pool
* @private @memberof QMPool
*/
/*! end of the memory managed by this memory pool
* @private @memberof QMPool
*/
/*! head of linked list of free blocks
* @private @memberof QMPool
*/
/*! maximum block size (in bytes)
* @private @memberof QMPool
*/
/*! total number of blocks
* @private @memberof QMPool
*/
/*! number of free blocks remaining
* @private @memberof QMPool
*/
/*! minimum number of free blocks ever present in this pool
* @private @memberof QMPool
*
* @details
* this attribute remembers the low watermark of the pool, which
* provides a valuable information for sizing event pools.
* @sa QF_getPoolMin().
*/
/*! Initializes the native QF memory pool
* @public @memberof QMPool
*
* @details
* Initialize a fixed block-size memory pool by providing it with the pool
* memory to manage, size of this memory, and the block size.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] poolSto pointer to the memory buffer for pool storage
* @param[in] poolSize size of the storage buffer in bytes
* @param[in] blockSize fixed-size of the memory blocks in bytes
*
* @attention
* The caller of QMPool::init() must make sure that the `poolSto`
* pointer is properly **aligned**. In particular, it must be possible to
* efficiently store a pointer at the location pointed to by `poolSto`.
* Internally, the QMPool_init() function rounds up the block size
* `blockSize` so that it can fit an integer number of pointers.
* This is done to achieve proper alignment of the blocks within the pool.
*
* @note
* Due to the rounding of block size the actual capacity of the pool might
* be less than (`poolSize` / `blockSize`). You can check the capacity
* of the pool by calling the QF_getPoolMin() function.
*
* @note
* This function is **not** protected by a critical section, because
* it is intended to be called only during the initialization of the system,
* when interrupts are not allowed yet.
*
* @note
* Many QF ports use memory pools to implement the event pools.
*
* @usage
* The following example illustrates how to invoke QMPool_init():
* @include qmp_init.c
*/
/*! @pre The memory block must be valid
* and the poolSize must fit at least one free block
* and the blockSize must not be too close to the top of the dynamic range
*/
Q_REQUIRE_ID(100, (poolSto != (void *)0)
&& (poolSize >= (uint_fast32_t)sizeof(QFreeBlock))
&& ((uint_fast16_t)(blockSize + sizeof(QFreeBlock)) > blockSize));
me->free_head = poolSto;
/* round up the blockSize to fit an integer # free blocks, no division */
me->blockSize = (QMPoolSize)sizeof(QFreeBlock); /* start with just one */
/* #free blocks that fit in one memory block */
uint_fast16_t nblocks = 1U;
while (me->blockSize < (QMPoolSize)blockSize) {
me->blockSize += (QMPoolSize)sizeof(QFreeBlock);
++nblocks;
}
blockSize = (uint_fast16_t)me->blockSize; /* round-up to nearest block */
/* the pool buffer must fit at least one rounded-up block */
Q_ASSERT_ID(110, poolSize >= blockSize);
/* chain all blocks together in a free-list... */
poolSize -= (uint_fast32_t)blockSize; /* don't count the last block */
me->nTot = 1U; /* the last block already in the pool */
/* start at the head of the free list */
QFreeBlock *fb = (QFreeBlock *)me->free_head;
/* chain all blocks together in a free-list... */
while (poolSize >= (uint_fast32_t)blockSize) {
fb->next = &fb[nblocks]; /* point next link to next block */
fb = fb->next; /* advance to the next block */
poolSize -= (uint_fast32_t)blockSize; /* reduce available pool size */
++me->nTot; /* increment the number of blocks so far */
}
fb->next = (QFreeBlock *)0; /* the last link points to NULL */
me->nFree = me->nTot; /* all blocks are free */
me->nMin = me->nTot; /* the minimum number of free blocks */
me->start = poolSto; /* the original start this pool buffer */
me->end = fb; /* the last block in this pool */
/*! Obtains a memory block from a memory pool.
* @public @memberof QMPool
*
* @details
* The function allocates a memory block from the pool and returns a pointer
* to the block back to the caller.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] margin the minimum number of unused blocks still available
* in the pool after the allocation.
*
* @returns
* A pointer to a memory block or NULL if no more blocks are available in
* the memory pool.
*
* @note
* This function can be called from any task level or ISR level.
*
* @note
* The memory pool `me` must be initialized before any events can
* be requested from it. Also, the QMPool_get() function uses internally a
* QF critical section, so you should be careful not to call it from within
* a critical section when nesting of critical section is not supported.
*
* @attention
* An allocated block must be later returned back to the **same** pool
* from which it has been allocated.
*
* @sa QMPool_put()
*
* @usage
* The following example illustrates how to use QMPool_get():
* @include qmp_use.c
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
QF_CRIT_STAT_
QF_CRIT_E_();
/* have more free blocks than the requested margin? */
QFreeBlock *fb;
if (me->nFree > (QMPoolCtr)margin) {
void *fb_next;
fb = (QFreeBlock *)me->free_head; /* get a free block */
/* the pool has some free blocks, so a free block must be available */
Q_ASSERT_CRIT_(310, fb != (QFreeBlock *)0);
fb_next = fb->next; /* put volatile to a temporary to avoid UB */
/* is the pool becoming empty? */
--me->nFree; /* one less free block */
if (me->nFree == 0U) {
/* pool is becoming empty, so the next free block must be NULL */
Q_ASSERT_CRIT_(320, fb_next == (QFreeBlock *)0);
me->nMin = 0U; /* remember that the pool got empty */
}
else {
/*! @invariant
* The pool is not empty, so the next free-block pointer,
* so the next free block must be in range.
*
* @tr{PQP18_3}
*/
/* NOTE: The next free block pointer can fall out of range
* when the client code writes past the memory block, thus
* corrupting the next block.
*/
Q_ASSERT_CRIT_(330,
(me->start <= fb_next) && (fb_next <= me->end));
/* is the number of free blocks the new minimum so far? */
if (me->nMin > me->nFree) {
me->nMin = me->nFree; /* remember the new minimum */
}
}
me->free_head = fb_next; /* set the head to the next free block */
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_GET, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this memory pool */
QS_MPC_PRE_(me->nFree); /* # of free blocks in the pool */
QS_MPC_PRE_(me->nMin); /* min # free blocks ever in the pool */
QS_END_NOCRIT_PRE_()
}
/* don't have enough free blocks at this point */
else {
fb = (QFreeBlock *)0;
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_GET_ATTEMPT, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this memory pool */
QS_MPC_PRE_(me->nFree); /* # of free blocks in the pool */
QS_MPC_PRE_(margin); /* the requested margin */
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return fb; /* return the block or NULL pointer to the caller */
/*! Recycles a memory block back to a memory pool.
* @public @memberof QMPool
*
* @details
* Recycle a memory block to the fixed block-size memory pool.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] b pointer to the memory block that is being recycled
*
* @attention
* The recycled block must be allocated from the **same** memory pool
* to which it is returned.
*
* @note
* This function can be called from any task level or ISR level.
*
* @sa
* QMPool_get()
*
* @usage
* The following example illustrates how to use QMPool_put():
* @include qmp_use.c
*/
#ifndef Q_SPY
Q_UNUSED_PAR(qs_id);
#endif
/*! @pre number of free blocks cannot exceed the total # blocks and
* the block pointer must be in range for this pool.
*/
Q_REQUIRE_ID(200, (me->nFree < me->nTot)
&& (me->start <= b) && (b <= me->end));
QF_CRIT_STAT_
QF_CRIT_E_();
((QFreeBlock *)b)->next = (QFreeBlock *)me->free_head;/* link into list */
me->free_head = b; /* set as new head of the free list */
++me->nFree; /* one more free block in this pool */
QS_BEGIN_NOCRIT_PRE_(QS_QF_MPOOL_PUT, qs_id)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this memory pool */
QS_MPC_PRE_(me->nFree); /* the number of free blocks in the pool */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
/*! @brief QF active object framework
* @class QF
*/
{
uint8_t dummy; /*< dummy attribute */
} QF;
/*! Interrupt lock up-down counter (used in some QF ports )
* @static @private @memberof QF
*/
/*! Interrupt nesting up-down counter (used in some QF ports )
* @static @private @memberof QF
*/
/*! QF initialization
* @static @public @memberof QF
*
* @details
* Initializes QF and must be called exactly once before any other QF
* function. Typcially, QF_init() is called from main() even before
* initializing the Board Support Package (BSP).
*
* @note
* QF_init() clears the internal QF variables, so that the framework
* can start correctly even if the startup code fails to clear the
* uninitialized data (as is required by the C Standard).
*/
/*! Function invoked by the application layer to stop the QF
* application and return control to the OS/Kernel.
* @static @public @memberof QF
*
* @details
* This function stops the QF application. After calling this function,
* QF attempts to gracefully stop the application. This graceful shutdown
* might take some time to complete. The typical use of this function is
* for terminating the QF application to return back to the operating
* system or for handling fatal errors that require shutting down
* (and possibly re-setting) the system.
*
* @attention
* After calling QF_stop() the application must terminate and cannot
* continue. In particular, QF_stop() is **not** intended to be followed
* by a call to QF_init() to "resurrect" the application.
*
* @sa QF_onCleanup()
*/
/*! Transfers control to QF to run the application.
* @static @public @memberof QF
*
* @details
* QF_run() is typically called from your startup code after you initialize
* the QF and start at least one active object with QACTIVE_START().
*
* @returns
* In QK, the QF_run() does not return.
*/
/*! initialization of publish-subscribe
*
* @deprecated
* @sa QActive_psInit()
*/
QActive_psInit(subscrSto, maxSignal);
/*! This function returns the minimum of free entries of
* the given event queue.
* @static @public @memberof QF
*
* @details
* Queries the minimum of free ever present in the given event queue of
* an active object with priority `prio`, since the active object
* was started.
*
* @note
* This function is available only when the native QF event queue
* implementation is used. Requesting the queue minimum of an unused
* priority level raises an assertion in the QF. (A priority level becomes
* used in QF after the call to the QActive_register_() function.)
*
* @param[in] prio Priority of the active object, whose queue is queried
*
* @returns
* the minimum of free ever present in the given event queue of an active
* object with priority `prio`, since the active object was started.
*/
Q_REQUIRE_ID(400, (prio <= QF_MAX_ACTIVE)
&& (QActive_registry_[prio] != (QActive *)0));
QF_CRIT_STAT_
QF_CRIT_E_();
uint_fast16_t const min =
(uint_fast16_t)QActive_registry_[prio]->eQueue.nMin;
QF_CRIT_X_();
return min;
/*! Startup QF callback.
* @static @public @memberof QF
*
* @details
* The purpose of the QF_onStartup() callback is to configure and enable
* hardware interrupts. The callback is invoked from QF_run(), right before
* starting the underlying real-time kernel. By that time, the application
* is considered ready to receive and service interrupts.
*
* This function is application-specific and is not implemented in QF, but
* rather in the Board Support Package (BSP) for the given application.
*/
/*! Cleanup QF callback.
* @static @public @memberof QF
*
* @details
* QF_onCleanup() is called in some QF ports before QF returns to the
* underlying real-time kernel or operating system.
*
* This function is strongly platform-specific and is not implemented in
* the QF, but either in the QF port or in the Board Support Package (BSP)
* for the given application. Some QF ports might not require implementing
* QF_onCleanup() at all, because many embedded applications don't have
* anything to exit to.
*
* @sa QF_stop()
*/
/*! Event pool initialization for dynamic allocation of events.
* @static @public @memberof QF
*
* @details
* This function initializes one event pool at a time and must be called
* exactly once for each event pool before the pool can be used.
*
* @param[in] poolSto pointer to the storage for the event pool
* @param[in] poolSize size of the storage for the pool in bytes
* @param[in] evtSize the block-size of the pool in bytes, which determines
* the maximum size of events that can be allocated from the pool.
*
* @attention
* You might initialize many event pools by making many consecutive calls
* to the QF_poolInit() function. However, for the simplicity of the internal
* implementation, you must initialize event pools in the **ascending order**
* of the event size.
*
* Many RTOSes provide fixed block-size heaps, a.k.a. memory pools that can
* be adapted for QF event pools. In case such support is missing, QF provides
* a native QF event pool implementation. The macro #QF_EPOOL_TYPE_ determines
* the type of event pool used by a particular QF port. See structure ::QMPool
* for more information.
*
* @note The actual number of events available in the pool might be actually
* less than (`poolSize` / `evtSize`) due to the internal alignment
* of the blocks that the pool might perform. You can always check the
* capacity of the pool by calling QF_getPoolMin().
*
* @note The dynamic allocation of events is optional, meaning that you
* might choose not to use dynamic events. In that case calling QF_poolInit()
* and using up memory for the memory blocks is unnecessary.
*
* @sa QF initialization example for QF_init()
*/
/*! @pre cannot exceed the number of available memory pools */
Q_REQUIRE_ID(200, QF_maxPool_ < QF_MAX_EPOOL);
/*! @pre please initialize event pools in ascending order of evtSize: */
Q_REQUIRE_ID(201, (QF_maxPool_ == 0U)
|| (QF_EPOOL_EVENT_SIZE_(QF_ePool_[QF_maxPool_ - 1U])
< evtSize));
/* perform the platform-dependent initialization of the pool */
QF_EPOOL_INIT_(QF_ePool_[QF_maxPool_], poolSto, poolSize, evtSize);
++QF_maxPool_; /* one more pool */
#ifdef Q_SPY
/* generate the object-dictionary entry for the initialized pool */
{
uint8_t obj_name[9] = "EvtPool?";
obj_name[7] = (uint8_t)(((uint8_t)'0' + QF_maxPool_) & 0x7FU);
QS_obj_dict_pre_(&QF_ePool_[QF_maxPool_ - 1U], (char const *)obj_name);
}
#endif /* Q_SPY*/
/*! Obtain the block size of any registered event pools.
* @static @public @memberof QF
*
* @details
* Obtain the block size of any registered event pools
*/
return QF_EPOOL_EVENT_SIZE_(QF_ePool_[QF_maxPool_ - 1U]);
/*! Obtain the minimum of free entries of the given event pool.
* @static @public @memberof QF
*
* @details
* This function obtains the minimum number of free blocks in the given
* event pool since this pool has been initialized by a call to QF_poolInit().
*
* @param[in] poolId event pool ID in the range 1..QF_maxPool_, where
* QF_maxPool_ is the number of event pools initialized
* with the function QF_poolInit().
*
* @returns
* the minimum number of unused blocks in the given event pool.
*/
/*! @pre the poolId must be in range */
Q_REQUIRE_ID(400, (poolId <= QF_MAX_EPOOL)
&& (0U < poolId) && (poolId <= QF_maxPool_));
QF_CRIT_STAT_
QF_CRIT_E_();
uint_fast16_t const min = (uint_fast16_t)QF_ePool_[poolId - 1U].nMin;
QF_CRIT_X_();
return min;
/*! Internal QF implementation of creating new dynamic event.
* @static @private @memberof QF
*
* @details
* Allocates an event dynamically from one of the QF event pools.
*
* @param[in] evtSize the size (in bytes) of the event to allocate
* @param[in] margin the number of un-allocated events still available
* in a given event pool after the allocation completes.
* The special value ::QF_NO_MARGIN means that this function
* will assert if allocation fails.
* @param[in] sig the signal to be assigned to the allocated event
*
* @returns
* pointer to the newly allocated event. This pointer can be NULL only if
* margin != #QF_NO_MARGIN and the event cannot be allocated with the
* specified margin still available in the given pool.
*
* @note
* The internal QF function QF_newX_() raises an assertion when the
* `margin` parameter is #QF_NO_MARGIN and allocation of the event turns
* out to be impossible due to event pool depletion, or incorrect (too big)
* size of the requested event.
*
* @note
* The application code should not call this function directly.
* The only allowed use is thorough the macros Q_NEW() or Q_NEW_X().
*/
uint_fast8_t idx;
/* find the pool index that fits the requested event size ... */
for (idx = 0U; idx < QF_maxPool_; ++idx) {
if (evtSize <= QF_EPOOL_EVENT_SIZE_(QF_ePool_[idx])) {
break;
}
}
/* cannot run out of registered pools */
Q_ASSERT_ID(310, idx < QF_maxPool_);
/* get e -- platform-dependent */
QEvt *e;
#ifdef Q_SPY
QF_EPOOL_GET_(QF_ePool_[idx], e,
((margin != QF_NO_MARGIN) ? margin : 0U),
(uint_fast8_t)QS_EP_ID + idx + 1U);
#else
QF_EPOOL_GET_(QF_ePool_[idx], e,
((margin != QF_NO_MARGIN) ? margin : 0U), 0U);
#endif
/* was e allocated correctly? */
QS_CRIT_STAT_
if (e != (QEvt *)0) {
e->sig = (QSignal)sig; /* set signal for this event */
e->poolId_ = (uint8_t)(idx + 1U); /* store the pool ID */
e->refCtr_ = 0U; /* set the reference counter to 0 */
QS_BEGIN_PRE_(QS_QF_NEW, (uint_fast8_t)QS_EP_ID + e->poolId_)
QS_TIME_PRE_(); /* timestamp */
QS_EVS_PRE_(evtSize); /* the size of the event */
QS_SIG_PRE_(sig); /* the signal of the event */
QS_END_PRE_()
}
/* event cannot be allocated */
else {
/* This assertion means that the event allocation failed,
* and this failure cannot be tolerated. The most frequent
* reason is an event leak in the application.
*/
Q_ASSERT_ID(320, margin != QF_NO_MARGIN);
QS_BEGIN_PRE_(QS_QF_NEW_ATTEMPT, (uint_fast8_t)QS_EP_ID + idx + 1U)
QS_TIME_PRE_(); /* timestamp */
QS_EVS_PRE_(evtSize); /* the size of the event */
QS_SIG_PRE_(sig); /* the signal of the event */
QS_END_PRE_()
}
return e; /* can't be NULL if we can't tolerate failed allocation */
/*! Recycle a dynamic event
* @static @private @memberof QF
*
* @details
* This function implements a simple garbage collector for the dynamic events.
* Only dynamic events are candidates for recycling. (A dynamic event is one
* that is allocated from an event pool, which is determined as non-zero
* e->poolId_ attribute.) Next, the function decrements the reference counter
* of the event (e->refCtr_), and recycles the event only if the counter drops
* to zero (meaning that no more references are outstanding for this event).
* The dynamic event is recycled by returning it to the pool from which
* it was originally allocated.
*
* @param[in] e pointer to the event to recycle
*
* @note
* QF invokes the garbage collector at all appropriate contexts, when
* an event can become garbage (automatic garbage collection), so the
* application code should have no need to call QF_gc() directly. The QF_gc()
* function is exposed only for special cases when your application sends
* dynamic events to the "raw" thread-safe queues (see ::QEQueue). Such
* queues are processed outside of QF and the automatic garbage collection
* is **NOT** performed for these events. In this case you need to call
* QF_gc() explicitly.
*/
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QF_CRIT_STAT_
QF_CRIT_E_();
/* isn't this the last reference? */
if (e->refCtr_ > 1U) {
QS_BEGIN_NOCRIT_PRE_(QS_QF_GC_ATTEMPT,
(uint_fast8_t)QS_EP_ID + e->poolId_)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
QEvt_refCtr_dec_(e); /* decrement the ref counter */
QF_CRIT_X_();
}
/* this is the last reference to this event, recycle it */
else {
uint_fast8_t const idx = (uint_fast8_t)e->poolId_ - 1U;
QS_BEGIN_NOCRIT_PRE_(QS_QF_GC,
(uint_fast8_t)QS_EP_ID + e->poolId_)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
/* pool ID must be in range */
Q_ASSERT_ID(410, idx < QF_maxPool_);
/* cast 'const' away, which is OK, because it's a pool event */
#ifdef Q_SPY
QF_EPOOL_PUT_(QF_ePool_[idx], (QEvt *)e,
(uint_fast8_t)QS_EP_ID + e->poolId_);
#else
QF_EPOOL_PUT_(QF_ePool_[idx], (QEvt *)e, 0U);
#endif
}
}
/*! Internal QF implementation of creating new event reference.
* @static @private @memberof QF
*
* @details
* Creates and returns a new reference to the current event e
*
* @param[in] e pointer to the current event
* @param[in] evtRef the event reference
*
* @returns
* the newly created reference to the event `e`
*
* @note
* The application code should not call this function directly.
* The only allowed use is thorough the macro Q_NEW_REF().
*/
#ifdef Q_NASSERT
Q_UNUSED_PAR(evtRef);
#endif
/*! @pre the event must be dynamic and the provided event reference
* must not be already in use */
Q_REQUIRE_ID(500,
(e->poolId_ != 0U)
&& (evtRef == (void *)0));
QF_CRIT_STAT_
QF_CRIT_E_();
QEvt_refCtr_inc_(e); /* increments the ref counter */
QS_BEGIN_NOCRIT_PRE_(QS_QF_NEW_REF,
(uint_fast8_t)QS_EP_ID + e->poolId_)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
return e;
/*! Internal QF implementation of deleting event reference.
* @static @private @memberof QF
*
* @details
* Deletes an existing reference to the event e
*
* @param[in] evtRef the event reference
*
* @note
* The application code should not call this function directly.
* The only allowed use is thorough the macro Q_DELETE_REF().
*/
QS_CRIT_STAT_
QEvt const * const e = (QEvt const *)evtRef;
QS_BEGIN_PRE_(QS_QF_DELETE_REF,
(uint_fast8_t)QS_EP_ID + e->poolId_)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_PRE_()
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
/*! array of event pools managed by QF */
/*! number of initialized event pools */
/*! "Ready-set" of all threads used in the built-in kernels
* @static @private @memberof QF
*/
/*! Clear a specified region of memory to zero.
* @static @public @memberof QF
*
* @details
* Clears a memory buffer by writing zeros byte-by-byte.
*
* @param[in] start pointer to the beginning of a memory buffer.
* @param[in] len length of the memory buffer to clear (in bytes)
*
* @note The main application of this function is clearing the internal QF
* variables upon startup. This is done to avoid problems with non-standard
* startup code provided with some compilers and toolsets (e.g., TI DSPs or
* Microchip MPLAB), which does not zero the uninitialized variables, as
* required by the ANSI C standard.
*/
uint8_t *ptr = (uint8_t *)start;
for (uint_fast16_t n = len; n > 0U; --n) {
*ptr = 0U;
++ptr;
}
/*! QF context switch callback used in built-in kernels (QV, QK, QXK)
* @static @public @memberof QF
*
* @details
* This callback function provides a mechanism to perform additional
* custom operations when one of the built-in kernels switches context
* from one thread to another.
*
* @param[in] prev pointer to the previous thread (active object)
* (prev==0 means that `prev` was the idle loop)
* @param[in] next pointer to the next thread (active object)
* (next==0) means that `next` is the idle loop)
* @attention
* QF_onContextSw() is invoked with interrupts **disabled** and must also
* return with interrupts **disabled**.
*
* @note
* This callback is enabled by defining the macro #QF_ON_CONTEXT_SW.
*
* @include qf_oncontextsw.c
*/
Native QF event pool
QMPool
Native QF event pool initialization
\
(QMPool_init(&(p_), (poolSto_), (poolSize_), (evtSize_)))
Native QF event pool event-size getter
((uint_fast16_t)(p_).blockSize)
Native QF event pool get-event
\
((e_) = (QEvt *)QMPool_get(&(p_), (m_), (qs_id_)))
Native QF event pool put-event
\
(QMPool_put(&(p_), (e_), (qs_id_)))
/*! The size [bytes] of the internal QS buffer-counters. Valid values: 2U or 4U;
* default 2U.
*
* @details
* This macro can be defined in the QS port file (qs_port.h) to
* configure the ::QSCtr type. Here the macro is not defined so the
* default of 2 byte is chosen.
*/
2U
/*! The size [bytes] of the QS time stamp. Valid values: 1U, 2U, or 4U;
* default 4U.
*
* @details
* This macro can be defined in the QS port file (qs_port.h) to
* configure the ::QSTimeCtr type. Here the macro is not defined so the
* default of 4 byte is chosen.
*/
4U
/*! Initialize the QS facility
*
* @details
* This macro provides an indirection layer to invoke the QS initialization
* routine if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
* @sa QS_onStartup(), example of setting up a QS filter in
* QS_GLB_FILTER()
*/
(QS_onStartup(arg_))
/*! Cleanup the QS facility
*
* @details
* This macro provides an indirection layer to invoke the QS cleanup
* routine if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
* @sa QS_onCleanup()
*/
(QS_onCleanup())
/*! macro to handle the QS output from the application
*
* @note
* If this macro is used, the application must define QS_output().
*/
(QS_output())
/*! macro to handle the QS-RX input to the application
*
* @note
* If this macro is used, the application must define QS_doInput().
*/
(QS_rx_input())
/*! Global Filter ON for a given record type `rec_`
*
* @details
* This macro provides an indirection layer to call QS_filterOn()
* if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
*
* @sa
* - enum QSpyGroups - QS record groups that can be used as `rec_`
* - enum QSpyPre - predefined QS records that can be used as `rec_`
*
* @usage
* The following example shows how to use QS filters:
* @include qs_filter.c
*/
(QS_glbFilter_((int_fast16_t)(rec_)))
/*! Local Filter for a given state machine object `qs_id`
*
* @details
* This macro provides an indirection layer to call QS_locFilter_()
* if #Q_SPY is defined, or do nothing if #Q_SPY is not defined.
*
* @sa
* - enum QSpyIdGroups - QS ID groups that can be used as `qs_id_`
* - enum QSpyIdOffsets - QS ID offsets for `qs_id_` (e.g., QS_AP_IDS + 5)
*
* The following example shows how to use QS filters:
* @include qs_filter.c
*/
(QS_locFilter_((int_fast16_t)(qs_id_)))
/*! Begin an application-specific QS record with entering critical section
*
* @details
* The following example shows how to build a user QS record using the
* macros QS_BEGIN_ID(), QS_END(), and the formatted output macros:
* QS_U8(), QS_STR(), etc.
*
* @note
* Must always be used in pair with QS_END()
*
* @include qs_ap.c
*/
\
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_CRIT_STAT_ \
QS_CRIT_E_(); \
QS_beginRec_((uint_fast8_t)(rec_)); \
QS_TIME_PRE_(); {
/*! End an application-specific QS record with exiting critical section.
*
* @sa example for QS_BEGIN_ID()
* @note Must always be used in pair with QS_BEGIN_ID()
*/
} \
QS_endRec_(); \
QS_CRIT_X_(); \
}
/*! Flush the QS trace data to the host
*
* @details
* This macro invokes the QS_flush() platform-dependent callback
* function to flush the QS trace buffer to the host. The function
* typically busy-waits until all the data in the buffer is sent to
* the host. This is acceptable only in the initial transient.
*/
(QS_onFlush())
/*! Begin an application-specific QS record WITHOUT entering critical section */
\
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_beginRec_((uint_fast8_t)(rec_)); \
QS_TIME_PRE_(); {
/*! End an application-specific QS record WITHOUT exiting critical section */
} \
QS_endRec_();\
}
/*! Helper macro for checking the global QS filter */
\
(((uint_fast8_t)QS_priv_.glbFilter[(uint_fast8_t)(rec_) >> 3U] \
& ((uint_fast8_t)1U << ((uint_fast8_t)(rec_) & 7U))) != 0U)
/*! Helper macro for checking the local QS filter */
\
(((uint_fast8_t)QS_priv_.locFilter[(uint_fast8_t)(qs_id_) >> 3U] \
& ((uint_fast8_t)1U << ((uint_fast8_t)(qs_id_) & 7U))) != 0U)
/*! Macro to execute user code when a QS record is produced
*
* @note
* This is a dummy definition in case this macro is undefined.
*/
((void)0)
/*! Output formatted int8_t to the QS record */
\
(QS_u8_fmt_((uint8_t)(((width_) << 4U) & 0x7U) | (uint8_t)QS_I8_ENUM_T, \
(data_)))
/*! Output formatted uint8_t to the QS record */
\
(QS_u8_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_U8_T, (data_)))
/*! Output formatted int16_t to the QS record */
\
(QS_u16_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_I16_T, (data_)))
/*! Output formatted uint16_t to the QS record */
\
(QS_u16_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_U16_T, (data_)))
/*! Output formatted int32_t to the QS record */
\
(QS_u32_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_I32_T, (data_)))
/*! Output formatted uint32_t to the QS record */
\
(QS_u32_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_U32_T, (data_)))
/*! Output formatted int64_t to the QS record */
\
(QS_u64_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_I64_T, (data_)))
/*! Output formatted uint64_t to the QS record */
\
(QS_u64_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_U64_T, (data_)))
/*! Output formatted 32-bit floating point number to the QS record */
\
(QS_f32_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_F32_T, (data_)))
/*! Output formatted 64-bit floating point number to the QS record */
\
(QS_f64_fmt_((uint8_t)(((width_) << 4)) | (uint8_t)QS_F64_T, (data_)))
/*! Output formatted zero-terminated ASCII string to the QS record */
(QS_str_fmt_((str_)))
/*! Output formatted memory block of up to 255 bytes to the QS record */
(QS_mem_fmt_((mem_), (size_)))
/*! Output formatted enumeration to the QS record */
\
(QS_u8_fmt_((uint8_t)(0x80U | ((group_) << 4U)) | (uint8_t)QS_I8_ENUM_T,\
(uint8_t)(value_)))
/*! Output time stamp to a QS record (used in predefined
* and application-specific trace records)
*/
(QS_u32_raw_(QS_onGetTime()))
(QS_u16_raw_(QS_onGetTime()))
(QS_u8_raw_(QS_onGetTime()))
/*! Output formatted object pointer to the QS record */
(QS_u32_fmt_(QS_OBJ_T, (uint32_t)(obj_)))
(QS_u16_fmt_(QS_OBJ_T, (uint16_t)(obj_)))
(QS_u8_fmt_(QS_OBJ_T, (uint8_t)(obj_)))
(QS_u64_fmt_(QS_OBJ_T, (uint64_t)(obj_)))
/* Output formatted function pointer to the QS record */
(QS_u32_fmt_(QS_FUN_T, (uint32_t)(fun_)))
(QS_u16_fmt_(QS_FUN_T, (uint16_t)(fun_)))
(QS_u8_fmt_(QS_FUN_T, (uint8_t)(fun_)))
(QS_u64_fmt_(QS_FUN_T, (uint64_t)(fun_)))
/*! Output formatted event signal (of type ::QSignal) and
* the state machine object to the user QS record
*/
\
QS_u32_fmt_(QS_SIG_T, (sig_)); \
QS_obj_raw_(obj_)
\
QS_u16_fmt_(QS_SIG_T, (sig_)); \
QS_obj_raw_(obj_)
\
QS_u8_fmt_(QS_SIG_T, (sig_)); \
QS_obj_raw_(obj_)
/*! Output QS signal dictionary record
*
* @details
* A signal dictionary record associates the numerical value of the signal
* and the binary address of the state machine that consumes that signal
* with the human-readable name of the signal.
*
* @param[in] sig_ event signal (typically enumerated, e.g. `TIMEOUT_SIG`)
* @param[in] obj_ pointer to the associated state machine object
* (might be `(void*)0` for globally recognized signals)
*
* A signal dictionary entry is associated with both the signal value `sig_`
* and the state machine `obj_`, because signals are required to be unique
* only within a given state machine and therefore the same numerical values
* can represent different signals in different state machines.
*
* For the "global" signals that have the same meaning in many state machines
* (such as globally published signals), you can specify a signal dictionary
* entry with the `obj_` parameter set to `(void*)0`.
*
* The following example shows the definition of signal dictionary entries
* in the initial transition of the Table active object. Please note that
* signals HUNGRY_SIG and DONE_SIG are associated with the Table state
* machine only ("me" `obj_` pointer). The EAT_SIG signal, on the other
* hand, is global (0 `obj_` pointer):
* @include qs_sigDic.c
*
* The following QSpy log example shows the signal dictionary records
* generated from the Table initial transition and subsequent records that
* show human-readable names of the signals:
* @include qs_sigLog.txt
*/
\
(QS_sig_dict_pre_((sig_), (obj_), #sig_))
/*! Output object dictionary record
*
* @details
* An object dictionary record associates the binary address of an object
* in the target's memory with the human-readable name of the object.
*
* @param[in] obj_ pointer to the object (any object)
*
* The following example shows the definition of object dictionary entry
* for the Table active object:
* @include qs_objDic.c
*/
\
(QS_obj_dict_pre_((obj_), #obj_))
/*! Output object-array dictionary record
*
* @details
* An object array dictionary record associates the binary address of the
* object element in the target's memory with the human-readable name
* of the object.
*
* @param[in] obj_ pointer to the object (any object)
* @param[in] idx_ array index
*
* The following example shows the definition of object array dictionary
* for `Philo::inst[n]` and `Philo::inst[n].m_timeEvt`:
* @include qs_objArrDic.c
*/
\
(QS_obj_arr_dict_pre_((obj_), (idx_), #obj_))
/*! Output function dictionary record
*
* @details
* A function dictionary record associates the binary address of a function
* in the target's memory with the human-readable name of the function.
*
* Providing a function dictionary QS record can vastly improve readability
* of the QS log, because instead of dealing with cryptic machine addresses
* the QSpy host utility can display human-readable function names.
*
* The example from #QS_SIG_DICTIONARY shows the definition of a function
* dictionary.
*/
\
(QS_fun_dict_pre_((void (*)(void))(fun_), #fun_))
/*! Output user QS record dictionary record
*
* @details
* A user QS record dictionary record associates the numerical value of a
* user record with the human-readable identifier.
*/
\
(QS_usr_dict_pre_((rec_), #rec_))
/*! Output enumeration dictionary record
*
* @details
* An enum QS record dictionary record associates the numerical value of
* an enumeration with the human-readable identifier.
*/
\
(QS_enum_dict_pre_((value_), (group_), #value_))
/*! Output the critical section entry record */
/*! Output the critical section exit record */
/*! Output the interrupt entry record *//*! Output the ISR entry */
QS_BEGIN_NOCRIT_PRE_(QS_QF_ISR_ENTRY, 0U)
QS_TIME_PRE_();
QS_2u8_raw_(isrnest, prio);
QS_END_NOCRIT_PRE_()
/*! Output the ISR exit trace record */
QS_BEGIN_NOCRIT_PRE_(QS_QF_ISR_EXIT, 0U)
QS_TIME_PRE_();
QS_2u8_raw_(isrnest, prio);
QS_END_NOCRIT_PRE_()
/*! Execute an action that is only necessary for QS output */
(act_)
/*! Constant representing End-Of-Data condition returned from the
* QS_getByte() function.
*/
((uint16_t)0xFFFFU)
/*! Constant representing command enumeration group
* in QS_ENUM_DICTIONARY() and QS_ENUM()
* @sa QS_onCommand()
*/
((uint8_t)7U)
/*! Constant representing HEX format for the "width" filed
* in QS_U8(), QS_U16(), QS_U32(), and QS_U64().
*/
((uint8_t)0x0FU)
/*! QS ring buffer counter and offset type */
/*! QS time stamp type, which determines the dynamic range of QS time stamps */
/*! QS function pointer type (for serializing function pointers) */
/*! QS pre-defined record types (TX channel)
* @static @public @memberof QS_tx
*
* @details
* This enumeration specifies the record types used in the QP components.
* You can specify your own record types starting from ::QS_USER offset.
* Currently, the maximum of all records cannot exceed 125.
*
* @note
* The QS records labeled as "not maskable" are always enabled and cannot
* be turend off with the QS_GLB_FILTER() macro. Other QS trace records
* can be disabled by means of the "global filters"
*
* @sa QS_GLB_FILTER() macro
*/
{
/* [0] QS session (not maskable) */
QS_EMPTY, /*!< QS record for cleanly starting a session */
/* [1] SM records */
QS_QEP_STATE_ENTRY, /*!< a state was entered */
QS_QEP_STATE_EXIT, /*!< a state was exited */
QS_QEP_STATE_INIT, /*!< an initial transition was taken in a state */
QS_QEP_INIT_TRAN, /*!< the top-most initial transition was taken */
QS_QEP_INTERN_TRAN, /*!< an internal transition was taken */
QS_QEP_TRAN, /*!< a regular transition was taken */
QS_QEP_IGNORED, /*!< an event was ignored (silently discarded) */
QS_QEP_DISPATCH, /*!< an event was dispatched (begin of RTC step) */
QS_QEP_UNHANDLED, /*!< an event was un-handled due to a guard */
/* [10] Active Object (AO) records */
QS_QF_ACTIVE_DEFER, /*!< AO deferred an event */
QS_QF_ACTIVE_RECALL, /*!< AO recalled an event */
QS_QF_ACTIVE_SUBSCRIBE, /*!< an AO subscribed to an event */
QS_QF_ACTIVE_UNSUBSCRIBE, /*!< an AO unsubscribed to an event */
QS_QF_ACTIVE_POST, /*!< an event was posted (FIFO) directly to AO */
QS_QF_ACTIVE_POST_LIFO, /*!< an event was posted (LIFO) directly to AO */
QS_QF_ACTIVE_GET, /*!< AO got an event and its queue is not empty */
QS_QF_ACTIVE_GET_LAST,/*!< AO got an event and its queue is empty */
QS_QF_ACTIVE_RECALL_ATTEMPT, /*!< AO attempted to recall an event */
/* [19] Event Queue (EQ) records */
QS_QF_EQUEUE_POST, /*!< an event was posted (FIFO) to a raw queue */
QS_QF_EQUEUE_POST_LIFO, /*!< an event was posted (LIFO) to a raw queue */
QS_QF_EQUEUE_GET, /*!< get an event and queue still not empty */
QS_QF_EQUEUE_GET_LAST,/*!< get the last event from the queue */
/* [23] Framework (QF) records */
QS_QF_NEW_ATTEMPT, /*!< an attempt to allocate an event failed */
/* [24] Memory Pool (MP) records */
QS_QF_MPOOL_GET, /*!< a memory block was removed from memory pool */
QS_QF_MPOOL_PUT, /*!< a memory block was returned to memory pool */
/* [26] Additional Framework (QF) records */
QS_QF_PUBLISH, /*!< an event was published to active objects */
QS_QF_NEW_REF, /*!< new event reference was created */
QS_QF_NEW, /*!< new event was created */
QS_QF_GC_ATTEMPT, /*!< garbage collection attempt */
QS_QF_GC, /*!< garbage collection */
QS_QF_TICK, /*!< QTimeEvt_tick_() was called */
/* [32] Time Event (TE) records */
QS_QF_TIMEEVT_ARM, /*!< a time event was armed */
QS_QF_TIMEEVT_AUTO_DISARM, /*!< a time event expired and was disarmed */
QS_QF_TIMEEVT_DISARM_ATTEMPT,/*!< attempt to disarm a disarmed QTimeEvt */
QS_QF_TIMEEVT_DISARM, /*!< true disarming of an armed time event */
QS_QF_TIMEEVT_REARM, /*!< rearming of a time event */
QS_QF_TIMEEVT_POST, /*!< a time event posted itself directly to an AO */
/* [38] Additional Framework (QF) records */
QS_QF_DELETE_REF, /*!< an event reference is about to be deleted */
QS_QF_CRIT_ENTRY, /*!< critical section was entered */
QS_QF_CRIT_EXIT, /*!< critical section was exited */
QS_QF_ISR_ENTRY, /*!< an ISR was entered */
QS_QF_ISR_EXIT, /*!< an ISR was exited */
QS_QF_INT_DISABLE, /*!< interrupts were disabled */
QS_QF_INT_ENABLE, /*!< interrupts were enabled */
/* [45] Additional Active Object (AO) records */
QS_QF_ACTIVE_POST_ATTEMPT,/*!< attempt to post an evt to AO failed */
/* [46] Additional Event Queue (EQ) records */
QS_QF_EQUEUE_POST_ATTEMPT,/*!< attempt to post evt to QEQueue failed */
/* [47] Additional Memory Pool (MP) records */
QS_QF_MPOOL_GET_ATTEMPT, /*!< attempt to get a memory block failed */
/* [48] Scheduler (SC) records */
QS_SCHED_PREEMPT, /*!< scheduler asynchronously preempted a task */
QS_SCHED_RESTORE, /*!< scheduler restored preempted task */
QS_SCHED_LOCK, /*!< scheduler was locked */
QS_SCHED_UNLOCK, /*!< scheduler was unlocked */
QS_SCHED_NEXT, /*!< scheduler started new task */
QS_SCHED_IDLE, /*!< scheduler restored the idle task */
/* [54] Miscellaneous QS records (not maskable) */
QS_ENUM_DICT, /*!< enumeration dictionary entry */
/* [55] Additional QEP records */
QS_QEP_TRAN_HIST, /*!< a tran to history was taken */
QS_QEP_TRAN_EP, /*!< a tran to entry point into a submachine */
QS_QEP_TRAN_XP, /*!< a tran to exit point out of a submachine */
/* [58] Miscellaneous QS records (not maskable) */
QS_TEST_PAUSED, /*!< test has been paused */
QS_TEST_PROBE_GET, /*!< reports that Test-Probe has been used */
QS_SIG_DICT, /*!< signal dictionary entry */
QS_OBJ_DICT, /*!< object dictionary entry */
QS_FUN_DICT, /*!< function dictionary entry */
QS_USR_DICT, /*!< user QS record dictionary entry */
QS_TARGET_INFO, /*!< reports the Target information */
QS_TARGET_DONE, /*!< reports completion of a user callback */
QS_RX_STATUS, /*!< reports QS data receive status */
QS_QUERY_DATA, /*!< reports the data from "current object" query */
QS_PEEK_DATA, /*!< reports the data from the PEEK query */
QS_ASSERT_FAIL, /*!< assertion failed in the code */
QS_QF_RUN, /*!< QF_run() was entered */
/* [71] Semaphore (SEM) records */
QS_SEM_TAKE, /*!< a semaphore was taken by a thread */
QS_SEM_BLOCK, /*!< a semaphore blocked a thread */
QS_SEM_SIGNAL, /*!< a semaphore was signaled */
QS_SEM_BLOCK_ATTEMPT, /*!< a semaphore blocked was attempted */
/* [75] Mutex (MTX) records */
QS_MTX_LOCK, /*!< a mutex was locked */
QS_MTX_BLOCK, /*!< a mutex blocked a thread */
QS_MTX_UNLOCK, /*!< a mutex was unlocked */
QS_MTX_LOCK_ATTEMPT, /*!< a mutex lock was attempted */
QS_MTX_BLOCK_ATTEMPT, /*!< a mutex blocking was attempted */
QS_MTX_UNLOCK_ATTEMPT,/*!< a mutex unlock was attempted */
/* [81] */
QS_PRE_MAX /*!< the number of predefined signals */
};
/*! QS record groups for QS_GLB_FILTER()
* @static @public @memberof QS_tx
*/
{
QS_ALL_RECORDS = 0xF0,/*!< all maskable QS records */
QS_SM_RECORDS, /*!< State Machine QS records */
QS_AO_RECORDS, /*!< Active Object QS records */
QS_EQ_RECORDS, /*!< Event Queues QS records */
QS_MP_RECORDS, /*!< Memory Pools QS records */
QS_TE_RECORDS, /*!< Time Events QS records */
QS_QF_RECORDS, /*!< QF QS records */
QS_SC_RECORDS, /*!< Scheduler QS records */
QS_SEM_RECORDS, /*!< Semaphore QS records */
QS_MTX_RECORDS, /*!< Mutex QS records */
QS_U0_RECORDS, /*!< User Group 100-104 records */
QS_U1_RECORDS, /*!< User Group 105-109 records */
QS_U2_RECORDS, /*!< User Group 110-114 records */
QS_U3_RECORDS, /*!< User Group 115-119 records */
QS_U4_RECORDS, /*!< User Group 120-124 records */
QS_UA_RECORDS /*!< All User records */
};
/*! QS user record group offsets for QS_GLB_FILTER()
* @static @public @memberof QS_tx
*/
{
QS_USER = 100, /*!< the first record available to QS users */
QS_USER0 = (enum_t)QS_USER, /*!< offset for User Group 0 */
QS_USER1 = (enum_t)QS_USER0 + 5, /*!< offset for User Group 1 */
QS_USER2 = (enum_t)QS_USER1 + 5, /*!< offset for User Group 2 */
QS_USER3 = (enum_t)QS_USER2 + 5, /*!< offset for User Group 3 */
QS_USER4 = (enum_t)QS_USER3 + 5 /*!< offset for User Group 4 */
};
/*! QS ID offsets for QS_LOC_FILTER()
* @static @public @memberof QS_tx
*/
{
QS_AO_ID = 0, /*!< offset for AO priorities */
QS_EP_ID = 64, /*!< offset for event-pool IDs */
QS_EQ_ID = 80, /*!< offset for event-queue IDs */
QS_AP_ID = 96 /*!< offset for Application-specific IDs */
};
/*! QS ID groups for QS_LOC_FILTER()
* @static @public @memberof QS_tx
*/
{
QS_ALL_IDS = 0xF0, /*!< all QS IDs */
QS_AO_IDS = (0x80 + (enum_t)QS_AO_ID), /*!< AO IDs (priorities) */
QS_EP_IDS = (0x80 + (enum_t)QS_EP_ID), /*!< event-pool IDs */
QS_EQ_IDS = (0x80 + (enum_t)QS_EQ_ID), /*!< event-queue IDs */
QS_AP_IDS = (0x80 + (enum_t)QS_AP_ID) /*!< Application-specific IDs */
};
/*! function pointer type for QS_fun_dict_pre_()
* @static @private @memberof QS_tx
*/
)(void);
/*! @brief QS ID type for applying local filtering
* @static @public @memberof QS_tx
*/
/*! @brief Software tracing, output QS-TX
*
* @details
* This class groups together QS services.
*/
/*! global on/off QS filter */
/*! local on/off QS filter */
/*! @deprecated old local QS filter */
/*! pointer to the start of the QS-TX ring buffer */
/*! offset of the end of the ring buffer */
/*! offset to where next byte will be inserted */
/*! offset of where next record will be extracted */
/*! number of bytes currently in the ring buffer */
/*! sequence number of the last inserted QS record */
/*! checksum of the currently inserted record */
/*! critical section nesting level */
/* flags for internal use */
/*! Enumerates data elements for app-specific trace records */
{
QS_I8_ENUM_T, /*!< signed 8-bit integer or enum format */
QS_U8_T, /*!< unsigned 8-bit integer format */
QS_I16_T, /*!< signed 16-bit integer format */
QS_U16_T, /*!< unsigned 16-bit integer format */
QS_I32_T, /*!< signed 32-bit integer format */
QS_U32_T, /*!< unsigned 32-bit integer format */
QS_F32_T, /*!< 32-bit floating point format */
QS_F64_T, /*!< 64-bit floating point format */
QS_STR_T, /*!< zero-terminated ASCII string format */
QS_MEM_T, /*!< up to 255-bytes memory block format */
QS_SIG_T, /*!< event signal format */
QS_OBJ_T, /*!< object pointer format */
QS_FUN_T, /*!< function pointer format */
QS_I64_T, /*!< signed 64-bit integer format */
QS_U64_T /*!< unsigned 64-bit integer format */
};
/*! the only instance of the QS-TX object (Singleton) */
/*! Initialize the QS-TX data buffer
* @static @public @memberof QS_tx
*
* @details
* This function should be called from QS_onStartup() to provide
* QS with the data buffer. The first argument `sto` is the address
* of the memory block, and the second argument `stoSize` is the size
* of this block [in bytes]. Currently the size of the QS buffer cannot
* exceed 64KB.
*
* @param[in] sto pointer to the storage for the transmit buffer
* @param[in] stoSize size in [bytes] of the storage buffer
*
* @remark
* QS can work with quite small data buffers, but you will start losing
* data if the buffer is too small for the bursts of tracing activity.
* The right size of the buffer depends on the data production rate and
* the data output rate. QS offers flexible filtering to reduce the data
* production rate.
*
* @note
* If the data output rate cannot keep up with the production rate,
* QS will start overwriting the older data with newer data. This is
* consistent with the "last-is-best" QS policy. The record sequence
* counters and check sums on each record allow the QSPY host utility
* to easily detect any data loss.
*/
/* the provided buffer must be at least 8 bytes long */
Q_REQUIRE_ID(100, stoSize > 8U);
QS_priv_.buf = &sto[0];
QS_priv_.end = (QSCtr)stoSize;
QS_priv_.head = 0U;
QS_priv_.tail = 0U;
QS_priv_.used = 0U;
QS_priv_.seq = 0U;
QS_priv_.chksum = 0U;
QS_priv_.critNest = 0U;
QS_glbFilter_(-(int_fast16_t)QS_ALL_RECORDS); /* all global filters OFF */
QS_locFilter_((int_fast16_t)QS_ALL_IDS); /* all local filters ON */
QS_priv_.locFilter_AP = (void *)0; /* deprecated "AP-filter" */
/* produce an empty record to "flush" the QS trace buffer */
QS_beginRec_((uint_fast8_t)QS_EMPTY);
QS_endRec_();
/* produce the reset record to inform QSPY of a new session */
QS_target_info_pre_(0xFFU); /* send Reset and Target info */
/* hold off flushing after successfull initialization (see QS_INIT()) */
/*! Byte-oriented interface to the QS-TX data buffer
* @static @public @memberof QS_tx
*
* @details
* This function delivers one byte at a time from the QS data buffer.
*
* @returns
* the byte in the least-significant 8-bits of the 16-bit return
* value if the byte is available. If no more data is available at the
* time, the function returns ::QS_EOD (End-Of-Data).
*
* @note
* QS_getByte() is NOT protected with a critical section.
*/
uint16_t ret;
if (QS_priv_.used == 0U) {
ret = QS_EOD; /* set End-Of-Data */
}
else {
uint8_t const * const buf = QS_priv_.buf; /* put in a temporary */
QSCtr tail = QS_priv_.tail; /* put in a temporary (register) */
ret = (uint16_t)buf[tail]; /* set the byte to return */
++tail; /* advance the tail */
if (tail == QS_priv_.end) { /* tail wrap around? */
tail = 0U;
}
QS_priv_.tail = tail; /* update the tail */
--QS_priv_.used; /* one less byte used */
}
return ret; /* return the byte or EOD */
/*! Block-oriented interface to the QS-TX data buffer
* @static @public @memberof QS_tx
*
* @details
* This function delivers a contiguous block of data from the QS data
* buffer. The function returns the pointer to the beginning of the
* block, and writes the number of bytes in the block to the location
* pointed to by `pNbytes`. The argument `pNbytes` is also used as
* input to provide the maximum size of the data block that the caller
* can accept.
*
* @param[in,out] pNbytes pointer to the number of bytes to send.
* On input, `pNbytes` specifies the maximum number
* of bytes that the function can provide.
* On output, `pNbytes` contains the actual number
* of bytes available.
* @returns
* if data is available, the function returns pointer to the
* contiguous block of data and sets the value pointed to by `pNbytes`
* to the # available bytes. If data is available at the time the
* function is called, the function returns NULL pointer and sets the
* value pointed to by `pNbytes` to zero.
*
* @note
* Only the NULL return from QS_getBlock() indicates that the QS
* buffer is empty at the time of the call. The non-NULL return often
* means that the block is at the end of the buffer and you need to call
* QS_getBlock() again to obtain the rest of the data that
* "wrapped around" to the beginning of the QS data buffer.
*
* @note QS_getBlock() is **not** protected with a critical section.
*/
QSCtr const used = QS_priv_.used; /* put in a temporary (register) */
uint8_t const *buf;
/* any bytes used in the ring buffer? */
if (used != 0U) {
QSCtr tail = QS_priv_.tail; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QSCtr n = (QSCtr)(end - tail);
if (n > used) {
n = used;
}
if (n > (QSCtr)(*pNbytes)) {
n = (QSCtr)(*pNbytes);
}
*pNbytes = (uint16_t)n; /* n-bytes available */
buf = &QS_priv_.buf[tail]; /* the bytes are at the tail */
QS_priv_.used = (QSCtr)(used - n);
tail += n;
if (tail == end) {
tail = 0U;
}
QS_priv_.tail = tail;
}
else { /* no bytes available */
*pNbytes = 0U; /* no bytes available right now */
buf = (uint8_t *)0; /* no bytes available right now */
}
return buf;
/*! Set/clear the global Filter for a given QS record or a group
* of records
* @static @public @memberof QS_tx
*
* @details
* This function sets up the QS filter to enable record types specified
* in the `filter` parameter. The value #QS_ALL_RECORDS specifies to
* filter-in all records. This function should be called indirectly
* through the macro QS_GLB_FILTER()
*
* @param[in] filter the QS record-d or group to enable in the filter,
* if positive or disable, if negative. The record-id
* numbers must be in the range -127..127.
* @note
* Filtering based on the record-type is only the first layer of
* filtering. The second layer is based on the object-type. Both filter
* layers must be enabled for the QS record to be inserted in the
* QS buffer.
*
* @sa QS_locFilter_()
*/
bool const isRemove = (filter < 0);
uint8_t const rec = isRemove ? (uint8_t)(-filter) : (uint8_t)filter;
switch (rec) {
case QS_ALL_RECORDS: {
uint8_t const tmp = (isRemove ? 0x00U : 0xFFU);
/* set all global filters (partially unrolled loop) */
for (uint_fast8_t i = 0U;
i < Q_DIM(QS_priv_.glbFilter);
i += 4U)
{
QS_priv_.glbFilter[i ] = tmp;
QS_priv_.glbFilter[i + 1U] = tmp;
QS_priv_.glbFilter[i + 2U] = tmp;
QS_priv_.glbFilter[i + 3U] = tmp;
}
if (isRemove) {
/* leave the "not maskable" filters enabled,
* see qs.h, Miscellaneous QS records (not maskable)
*/
QS_priv_.glbFilter[0] = 0x01U;
QS_priv_.glbFilter[6] = 0x40U;
QS_priv_.glbFilter[7] = 0xFCU;
QS_priv_.glbFilter[8] = 0x7FU;
}
else {
/* never turn the last 3 records on (0x7D, 0x7E, 0x7F) */
QS_priv_.glbFilter[15] = 0x1FU;
}
break;
}
case QS_SM_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[0] &= (uint8_t)(~0xFEU & 0xFFU);
QS_priv_.glbFilter[1] &= (uint8_t)(~0x03U & 0xFFU);
QS_priv_.glbFilter[6] &= (uint8_t)(~0x80U & 0xFFU);
QS_priv_.glbFilter[7] &= (uint8_t)(~0x03U & 0xFFU);
}
else {
QS_priv_.glbFilter[0] |= 0xFEU;
QS_priv_.glbFilter[1] |= 0x03U;
QS_priv_.glbFilter[6] |= 0x80U;
QS_priv_.glbFilter[7] |= 0x03U;
}
break;
case QS_AO_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[1] &= (uint8_t)(~0xFCU & 0xFFU);
QS_priv_.glbFilter[2] &= (uint8_t)(~0x07U & 0xFFU);
QS_priv_.glbFilter[5] &= (uint8_t)(~0x20U & 0xFFU);
}
else {
QS_priv_.glbFilter[1] |= 0xFCU;
QS_priv_.glbFilter[2] |= 0x07U;
QS_priv_.glbFilter[5] |= 0x20U;
}
break;
case QS_EQ_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[2] &= (uint8_t)(~0x78U & 0xFFU);
QS_priv_.glbFilter[5] &= (uint8_t)(~0x40U & 0xFFU);
}
else {
QS_priv_.glbFilter[2] |= 0x78U;
QS_priv_.glbFilter[5] |= 0x40U;
}
break;
case QS_MP_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[3] &= (uint8_t)(~0x03U & 0xFFU);
QS_priv_.glbFilter[5] &= (uint8_t)(~0x80U & 0xFFU);
}
else {
QS_priv_.glbFilter[3] |= 0x03U;
QS_priv_.glbFilter[5] |= 0x80U;
}
break;
case QS_QF_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[2] &= (uint8_t)(~0x80U & 0xFFU);
QS_priv_.glbFilter[3] &= (uint8_t)(~0xFCU & 0xFFU);
QS_priv_.glbFilter[4] &= (uint8_t)(~0xC0U & 0xFFU);
QS_priv_.glbFilter[5] &= (uint8_t)(~0x1FU & 0xFFU);
}
else {
QS_priv_.glbFilter[2] |= 0x80U;
QS_priv_.glbFilter[3] |= 0xFCU;
QS_priv_.glbFilter[4] |= 0xC0U;
QS_priv_.glbFilter[5] |= 0x1FU;
}
break;
case QS_TE_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[4] &= (uint8_t)(~0x3FU & 0xFFU);
}
else {
QS_priv_.glbFilter[4] |= 0x3FU;
}
break;
case QS_SC_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[6] &= (uint8_t)(~0x3FU & 0xFFU);
}
else {
QS_priv_.glbFilter[6] |= 0x3FU;
}
break;
case QS_SEM_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[8] &= (uint8_t)(~0x80U & 0xFFU);
QS_priv_.glbFilter[9] &= (uint8_t)(~0x07U & 0xFFU);
}
else {
QS_priv_.glbFilter[8] |= 0x80U;
QS_priv_.glbFilter[9] |= 0x07U;
}
break;
case QS_MTX_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[9] &= (uint8_t)(~0xF8U & 0xFFU);
QS_priv_.glbFilter[10] &= (uint8_t)(~0x01U & 0xFFU);
}
else {
QS_priv_.glbFilter[9] |= 0xF8U;
QS_priv_.glbFilter[10] |= 0x01U;
}
break;
case QS_U0_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[12] &= (uint8_t)(~0xF0U & 0xFFU);
QS_priv_.glbFilter[13] &= (uint8_t)(~0x01U & 0xFFU);
}
else {
QS_priv_.glbFilter[12] |= 0xF0U;
QS_priv_.glbFilter[13] |= 0x01U;
}
break;
case QS_U1_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[13] &= (uint8_t)(~0x3EU & 0xFFU);
}
else {
QS_priv_.glbFilter[13] |= 0x3EU;
}
break;
case QS_U2_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[13] &= (uint8_t)(~0xC0U & 0xFFU);
QS_priv_.glbFilter[14] &= (uint8_t)(~0x07U & 0xFFU);
}
else {
QS_priv_.glbFilter[13] |= 0xC0U;
QS_priv_.glbFilter[14] |= 0x07U;
}
break;
case QS_U3_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[14] &= (uint8_t)(~0xF8U & 0xFFU);
}
else {
QS_priv_.glbFilter[14] |= 0xF8U;
}
break;
case QS_U4_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[15] &= 0x1FU;
}
else {
QS_priv_.glbFilter[15] |= 0x1FU;
}
break;
case QS_UA_RECORDS:
if (isRemove) {
QS_priv_.glbFilter[12] &= (uint8_t)(~0xF0U & 0xFFU);
QS_priv_.glbFilter[13] = 0U;
QS_priv_.glbFilter[14] = 0U;
QS_priv_.glbFilter[15] &= (uint8_t)(~0x1FU & 0xFFU);
}
else {
QS_priv_.glbFilter[12] |= 0xF0U;
QS_priv_.glbFilter[13] |= 0xFFU;
QS_priv_.glbFilter[14] |= 0xFFU;
QS_priv_.glbFilter[15] |= 0x1FU;
}
break;
default:
/* QS rec number can't exceed 0x7D, so no need for escaping */
Q_ASSERT_ID(210, rec < 0x7DU);
if (isRemove) {
QS_priv_.glbFilter[rec >> 3U]
&= (uint8_t)(~(1U << (rec & 7U)) & 0xFFU);
}
else {
QS_priv_.glbFilter[rec >> 3U]
|= (1U << (rec & 7U));
/* never turn the last 3 records on (0x7D, 0x7E, 0x7F) */
QS_priv_.glbFilter[15] &= 0x1FU;
}
break;
}
/*! Set/clear the local Filter for a given object-id
* or a group of object-ids
* @static @public @memberof QS_tx
*
* @details
* This function sets up the local QS filter to enable or disable the
* given QS object-id or a group of object-ids @a filter.
* This function should be called indirectly through the macro
* QS_LOC_FILTER()
*
* @param[in] filter the QS object-id or group to enable in the filter,
* if positive or disable, if negative. The qs_id numbers
* must be in the range 1..127.
* @note
* Filtering based on the object-id (local filter) is the second layer
* of filtering. The first layer is based on the QS record-type (global
* filter). Both filter layers must be enabled for the QS record to be
* inserted into the QS buffer.
*
* @sa QS_glbFilter_()
*/
bool const isRemove = (filter < 0);
uint8_t const qs_id = isRemove ? (uint8_t)(-filter) : (uint8_t)filter;
uint8_t const tmp = (isRemove ? 0x00U : 0xFFU);
uint_fast8_t i;
switch (qs_id) {
case QS_ALL_IDS:
/* set all local filters (partially unrolled loop) */
for (i = 0U; i < Q_DIM(QS_priv_.locFilter); i += 4U) {
QS_priv_.locFilter[i ] = tmp;
QS_priv_.locFilter[i + 1U] = tmp;
QS_priv_.locFilter[i + 2U] = tmp;
QS_priv_.locFilter[i + 3U] = tmp;
}
break;
case QS_AO_IDS:
for (i = 0U; i < 8U; i += 4U) {
QS_priv_.locFilter[i ] = tmp;
QS_priv_.locFilter[i + 1U] = tmp;
QS_priv_.locFilter[i + 2U] = tmp;
QS_priv_.locFilter[i + 3U] = tmp;
}
break;
case QS_EP_IDS:
i = 8U;
QS_priv_.locFilter[i ] = tmp;
QS_priv_.locFilter[i + 1U] = tmp;
break;
case QS_AP_IDS:
i = 12U;
QS_priv_.locFilter[i ] = tmp;
QS_priv_.locFilter[i + 1U] = tmp;
QS_priv_.locFilter[i + 2U] = tmp;
QS_priv_.locFilter[i + 3U] = tmp;
break;
default:
if (qs_id < 0x7FU) {
if (isRemove) {
QS_priv_.locFilter[qs_id >> 3U]
&= (uint8_t)(~(1U << (qs_id & 7U)) & 0xFFU);
}
else {
QS_priv_.locFilter[qs_id >> 3U]
|= (1U << (qs_id & 7U));
}
}
else {
Q_ERROR_ID(310); /* incorrect qs_id */
}
break;
}
QS_priv_.locFilter[0] |= 0x01U; /* leave QS_ID == 0 always on */
/*! Perform the QS-TX output (implemented in some QS ports)
* @static @public @memberof QS_tx
*/
/*! Mark the begin of a QS record `rec`
* @static @private @memberof QS_tx
*
* @details
* This function must be called at the beginning of each QS record.
* This function should be called indirectly through the macro QS_BEGIN_ID(),
* or QS_BEGIN_NOCRIT(), depending if it's called in a normal code or from
* a critical section.
*/
uint8_t const b = (uint8_t)(QS_priv_.seq + 1U);
uint8_t chksum = 0U; /* reset the checksum */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QS_priv_.seq = b; /* store the incremented sequence num */
QS_priv_.used += 2U; /* 2 bytes about to be added */
QS_INSERT_ESC_BYTE_(b)
chksum = (uint8_t)(chksum + rec); /* update checksum */
QS_INSERT_BYTE_((uint8_t)rec) /* rec byte does not need escaping */
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Mark the end of a QS record `rec`
* @static @private @memberof QS_tx
*
* @details
* This function must be called at the end of each QS record.
* This function should be called indirectly through the macro QS_END(),
* or QS_END_NOCRIT(), depending if it's called in a normal code or from
* a critical section.
*/
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head;
QSCtr const end = QS_priv_.end;
uint8_t b = QS_priv_.chksum;
b ^= 0xFFU; /* invert the bits in the checksum */
QS_priv_.used += 2U; /* 2 bytes about to be added */
if ((b != QS_FRAME) && (b != QS_ESC)) {
QS_INSERT_BYTE_(b)
}
else {
QS_INSERT_BYTE_(QS_ESC)
QS_INSERT_BYTE_(b ^ QS_ESC_XOR)
++QS_priv_.used; /* account for the ESC byte */
}
QS_INSERT_BYTE_(QS_FRAME) /* do not escape this QS_FRAME */
QS_priv_.head = head; /* save the head */
/* overrun over the old data? */
if (QS_priv_.used > end) {
QS_priv_.used = end; /* the whole buffer is used */
QS_priv_.tail = head; /* shift the tail to the old data */
}
/*! output uint8_t data element without format information
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QS_priv_.used += 1U; /* 1 byte about to be added */
QS_INSERT_ESC_BYTE_(d)
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! output two uint8_t data elements without format information
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QS_priv_.used += 2U; /* 2 bytes are about to be added */
QS_INSERT_ESC_BYTE_(d1)
QS_INSERT_ESC_BYTE_(d2)
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! output uint16_t data element without format information
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
uint16_t x = d;
QS_priv_.used += 2U; /* 2 bytes are about to be added */
QS_INSERT_ESC_BYTE_((uint8_t)x)
x >>= 8U;
QS_INSERT_ESC_BYTE_((uint8_t)x)
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! output uint32_t data element without format information
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
uint32_t x = d;
QS_priv_.used += 4U; /* 4 bytes are about to be added */
for (uint_fast8_t i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_((uint8_t)x)
x >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output obj pointer data element without format information
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macros, never in the
* client code directly.
*/
#if (QS_OBJ_PTR_SIZE == 1U)
QS_u8_raw_((uint8_t)obj);
#elif (QS_OBJ_PTR_SIZE == 2U)
QS_u16_raw_((uint16_t)obj);
#elif (QS_OBJ_PTR_SIZE == 4U)
QS_u32_raw_((uint32_t)obj);
#elif (QS_OBJ_PTR_SIZE == 8U)
QS_u64_raw_((uint64_t)obj);
#else
QS_u32_raw_((uint32_t)obj);
#endif
/*! Output raw zero-terminated string element (without format information)
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macros, never in the
* client code directly.
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QSCtr used = QS_priv_.used; /* put in a temporary (register) */
for (char const *s = str; *s != '\0'; ++s) {
chksum += (uint8_t)*s; /* update checksum */
QS_INSERT_BYTE_((uint8_t)*s) /* ASCII char doesn't need escaping */
++used;
}
QS_INSERT_BYTE_((uint8_t)'\0') /* zero-terminate the string */
++used;
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
QS_priv_.used = used; /* save # of used buffer space */
/*! Output uint8_t data element with format information
* @static @private @memberof QS_tx
*
* @details
* @note This function is only to be used through macros, never in the
* client code directly.
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QS_priv_.used += 2U; /* 2 bytes about to be added */
QS_INSERT_ESC_BYTE_(format)
QS_INSERT_ESC_BYTE_(d)
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! output uint16_t data element with format information
* @static @private @memberof QS_tx
*
* @details
* This function is only to be used through macros, never in the
* client code directly.
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
uint8_t b = (uint8_t)d;
QS_priv_.used += 3U; /* 3 bytes about to be added */
QS_INSERT_ESC_BYTE_(format)
QS_INSERT_ESC_BYTE_(b)
b = (uint8_t)(d >> 8U);
QS_INSERT_ESC_BYTE_(b)
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output uint32_t data element with format information
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macros, never in the
* client code directly.
*/
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
uint32_t x = d;
QS_priv_.used += 5U; /* 5 bytes about to be added */
QS_INSERT_ESC_BYTE_(format) /* insert the format byte */
/* insert 4 bytes... */
for (uint_fast8_t i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_((uint8_t)x)
x >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output formatted zero-terminated ASCII string to the QS record
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum;
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
QSCtr used = QS_priv_.used; /* put in a temporary (register) */
used += 2U; /* account for the format byte and the terminating-0 */
QS_INSERT_BYTE_((uint8_t)QS_STR_T)
chksum += (uint8_t)QS_STR_T;
for (char const *s = str; *s != '\0'; ++s) {
QS_INSERT_BYTE_((uint8_t)*s) /* ASCII char doesn't need escaping */
chksum += (uint8_t)*s; /* update checksum */
++used;
}
QS_INSERT_BYTE_(0U) /* zero-terminate the string */
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
QS_priv_.used = used; /* save # of used buffer space */
/*! Output formatted memory block of up to 255 bytes to the QS record
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum;
uint8_t * const buf = QS_priv_.buf; /* put in a temporary (register) */
QSCtr head = QS_priv_.head; /* put in a temporary (register) */
QSCtr const end = QS_priv_.end; /* put in a temporary (register) */
uint8_t const *pb = blk;
QS_priv_.used += ((QSCtr)size + 2U); /* size+2 bytes to be added */
QS_INSERT_BYTE_((uint8_t)QS_MEM_T)
chksum += (uint8_t)QS_MEM_T;
QS_INSERT_ESC_BYTE_(size)
/* output the 'size' number of bytes */
for (uint8_t len = size; len > 0U; --len) {
QS_INSERT_ESC_BYTE_(*pb)
++pb;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output predefined signal-dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_SIG_DICTIONARY()
*/
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_SIG_DICT);
QS_SIG_PRE_(sig);
QS_OBJ_PRE_(obj);
QS_str_raw_((*name == '&') ? &name[1] : name);
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output predefined object-dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_OBJ_DICTIONARY()
*/
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_OBJ_DICT);
QS_OBJ_PRE_(obj);
QS_str_raw_((*name == '&') ? &name[1] : name);
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output predefined object-array dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_OBJ_ARR_DICTIONARY()
*/
Q_REQUIRE_ID(400, idx < 1000U);
/* format idx into a char buffer as "xxx\0" */
uint8_t idx_str[4];
uint_fast16_t tmp = idx;
uint8_t i;
idx_str[3] = 0U; /* zero-terminate */
idx_str[2] = (uint8_t)((uint8_t)'0' + (tmp % 10U));
tmp /= 10U;
idx_str[1] = (uint8_t)((uint8_t)'0' + (tmp % 10U));
if (idx_str[1] == (uint8_t)'0') {
i = 2U;
}
else {
tmp /= 10U;
idx_str[0] = (uint8_t)((uint8_t)'0' + (tmp % 10U));
if (idx_str[0] == (uint8_t)'0') {
i = 1U;
}
else {
i = 0U;
}
}
QS_CRIT_STAT_
uint8_t j = ((*name == '&') ? 1U : 0U);
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_OBJ_DICT);
QS_OBJ_PRE_(obj);
for (; name[j] != '\0'; ++j) {
QS_U8_PRE_(name[j]);
if (name[j] == '[') {
++j;
break;
}
}
for (; idx_str[i] != 0U; ++i) {
QS_U8_PRE_(idx_str[i]);
}
/* skip chars until ']' */
for (; name[j] != '\0'; ++j) {
if (name[j] == ']') {
break;
}
}
for (; name[j] != '\0'; ++j) {
QS_U8_PRE_(name[j]);
}
QS_U8_PRE_(0U); /* zero-terminate */
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output predefined function-dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_FUN_DICTIONARY()
*/
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_FUN_DICT);
QS_FUN_PRE_(fun);
QS_str_raw_((*name == '&') ? &name[1] : name);
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output predefined user-dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_USR_DICTIONARY()
*/
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_USR_DICT);
QS_u8_raw_((uint8_t)rec);
QS_str_raw_(name);
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output predefined enum-dictionary record
* @static @private @memberof QS_tx
*
* @note This function is only to be used through macro QS_ENUM_DICTIONARY()
*/
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_ENUM_DICT);
QS_2U8_PRE_((uint8_t)value, group);
QS_str_raw_(name);
QS_endRec_();
QS_CRIT_X_();
QS_onFlush();
/*! Output the predefined assertion failure trace record
* @static @public @memberof QS_tx
*
* @details
* This trace record is intended to use from the Q_onAssert() callback.
*/
QS_BEGIN_NOCRIT_PRE_(QS_ASSERT_FAIL, 0U)
QS_TIME_PRE_();
QS_U16_PRE_(loc);
QS_STR_PRE_((module != (char *)0) ? module : "?");
QS_END_NOCRIT_PRE_()
QS_onFlush();
for (uint32_t volatile delay_ctr = delay; delay_ctr > 0U; --delay_ctr) {
}
QS_onCleanup();
/*! Helper function to output the predefined Target-info trace record
* @static @private @memberof QS_tx
*/
static uint8_t const ZERO = (uint8_t)'0';
static uint8_t const * const TIME = (uint8_t const *)&Q_BUILD_TIME[0];
static uint8_t const * const DATE = (uint8_t const *)&Q_BUILD_DATE[0];
static union {
uint16_t u16;
uint8_t u8[2];
} endian_test;
endian_test.u16 = 0x0102U;
QS_beginRec_((uint_fast8_t)QS_TARGET_INFO);
QS_U8_PRE_(isReset);
QS_U16_PRE_(((endian_test.u8[0] == 0x01U) /* big endian? */
? (0x8000U | QP_VERSION)
: QP_VERSION)); /* target endianness + version number */
/* send the object sizes... */
QS_U8_PRE_(Q_SIGNAL_SIZE | (QF_EVENT_SIZ_SIZE << 4U));
#ifdef QF_EQUEUE_CTR_SIZE
QS_U8_PRE_(QF_EQUEUE_CTR_SIZE | (QF_TIMEEVT_CTR_SIZE << 4U));
#else
QS_U8_PRE_(QF_TIMEEVT_CTR_SIZE << 4U);
#endif /* QF_EQUEUE_CTR_SIZE */
#ifdef QF_MPOOL_CTR_SIZE
QS_U8_PRE_(QF_MPOOL_SIZ_SIZE | (QF_MPOOL_CTR_SIZE << 4U));
#else
QS_U8_PRE_(0U);
#endif /* QF_MPOOL_CTR_SIZE */
QS_U8_PRE_(QS_OBJ_PTR_SIZE | (QS_FUN_PTR_SIZE << 4U));
QS_U8_PRE_(QS_TIME_SIZE);
/* send the limits... */
QS_U8_PRE_(QF_MAX_ACTIVE);
QS_U8_PRE_(QF_MAX_EPOOL | (QF_MAX_TICK_RATE << 4U));
/* send the build time in three bytes (sec, min, hour)... */
QS_U8_PRE_((10U * (uint8_t)(TIME[6] - ZERO))
+ (uint8_t)(TIME[7] - ZERO));
QS_U8_PRE_((10U * (uint8_t)(TIME[3] - ZERO))
+ (uint8_t)(TIME[4] - ZERO));
if (Q_BUILD_TIME[0] == ' ') {
QS_U8_PRE_(TIME[1] - ZERO);
}
else {
QS_U8_PRE_((10U * (uint8_t)(TIME[0] - ZERO))
+ (uint8_t)(TIME[1] - ZERO));
}
/* send the build date in three bytes (day, month, year) ... */
if (Q_BUILD_DATE[4] == ' ') {
QS_U8_PRE_(DATE[5] - ZERO);
}
else {
QS_U8_PRE_((10U * (uint8_t)(DATE[4] - ZERO))
+ (uint8_t)(DATE[5] - ZERO));
}
/* convert the 3-letter month to a number 1-12 ... */
uint8_t b;
switch ((int_t)DATE[0] + (int_t)DATE[1] + (int_t)DATE[2]) {
case (int_t)'J' + (int_t)'a' + (int_t)'n':
b = 1U;
break;
case (int_t)'F' + (int_t)'e' + (int_t)'b':
b = 2U;
break;
case (int_t)'M' + (int_t)'a' + (int_t)'r':
b = 3U;
break;
case (int_t)'A' + (int_t)'p' + (int_t)'r':
b = 4U;
break;
case (int_t)'M' + (int_t)'a' + (int_t)'y':
b = 5U;
break;
case (int_t)'J' + (int_t)'u' + (int_t)'n':
b = 6U;
break;
case (int_t)'J' + (int_t)'u' + (int_t)'l':
b = 7U;
break;
case (int_t)'A' + (int_t)'u' + (int_t)'g':
b = 8U;
break;
case (int_t)'S' + (int_t)'e' + (int_t)'p':
b = 9U;
break;
case (int_t)'O' + (int_t)'c' + (int_t)'t':
b = 10U;
break;
case (int_t)'N' + (int_t)'o' + (int_t)'v':
b = 11U;
break;
case (int_t)'D' + (int_t)'e' + (int_t)'c':
b = 12U;
break;
default:
b = 0U;
break;
}
QS_U8_PRE_(b); /* store the month */
QS_U8_PRE_((10U * (uint8_t)(DATE[9] - ZERO))
+ (uint8_t)(DATE[10] - ZERO));
QS_endRec_();
/*! Callback to startup the QS facility
* @static @public @memberof QS_tx
*/
/*! Callback to cleanup the QS facility
* @static @public @memberof QS_tx
*/
/*! Callback to flush the QS trace data to the host
* @static @public @memberof QS_tx
*/
/*! Callback to obtain a timestamp for a QS record
* @static @public @memberof QS_tx
*/
/*! Output uint64_t data element without format information
* @static @private @memberof QS_tx
*/
uint8_t chksum = QS_priv_.chksum;
uint8_t * const buf = QS_priv_.buf;
QSCtr head = QS_priv_.head;
QSCtr const end = QS_priv_.end;
QS_priv_.used += 8U; /* 8 bytes are about to be added */
uint_fast8_t i;
for (i = 8U; i != 0U; --i) {
uint8_t const b = (uint8_t)d;
QS_INSERT_ESC_BYTE_(b)
d >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output uint64_t data element with format information
* @static @private @memberof QS_tx
*
* @sa QS_U64(), QS_I64()
*/
uint8_t chksum = QS_priv_.chksum;
uint8_t * const buf = QS_priv_.buf;
QSCtr head = QS_priv_.head;
QSCtr const end = QS_priv_.end;
QS_priv_.used += 9U; /* 9 bytes are about to be added */
QS_INSERT_ESC_BYTE_(format) /* insert the format byte */
/* output 8 bytes of data... */
uint_fast8_t i;
for (i = 8U; i != 0U; --i) {
format = (uint8_t)d;
QS_INSERT_ESC_BYTE_(format)
d >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output 32-bit floating point data element with format information
* @static @private @memberof QS_tx
*
* @sa QS_F32()
*/
union F32Rep {
float32_t f;
uint32_t u;
} fu32; /* the internal binary representation */
uint8_t chksum = QS_priv_.chksum; /* put in a temporary (register) */
uint8_t * const buf = QS_priv_.buf;
QSCtr head = QS_priv_.head;
QSCtr const end = QS_priv_.end;
uint_fast8_t i;
fu32.f = d; /* assign the binary representation */
QS_priv_.used += 5U; /* 5 bytes about to be added */
QS_INSERT_ESC_BYTE_(format) /* insert the format byte */
/* insert 4 bytes... */
for (i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_((uint8_t)fu32.u)
fu32.u >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! Output 64-bit floating point data element with format information
* @static @private @memberof QS_tx
*
* @sa QS_F64()
*/
union F64Rep {
float64_t d;
uint32_t u[2];
} fu64; /* the internal binary representation */
uint8_t chksum = QS_priv_.chksum;
uint8_t * const buf = QS_priv_.buf;
QSCtr head = QS_priv_.head;
QSCtr const end = QS_priv_.end;
uint32_t i;
/* static constant untion to detect endianness of the machine */
static union U32Rep {
uint32_t u32;
uint8_t u8;
} const endian = { 1U };
fu64.d = d; /* assign the binary representation */
/* is this a big-endian machine? */
if (endian.u8 == 0U) {
/* swap fu64.u[0] <-> fu64.u[1]... */
i = fu64.u[0];
fu64.u[0] = fu64.u[1];
fu64.u[1] = i;
}
QS_priv_.used += 9U; /* 9 bytes about to be added */
QS_INSERT_ESC_BYTE_(format) /* insert the format byte */
/* output 4 bytes from fu64.u[0]... */
for (i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_((uint8_t)fu64.u[0])
fu64.u[0] >>= 8U;
}
/* output 4 bytes from fu64.u[1]... */
for (i = 4U; i != 0U; --i) {
QS_INSERT_ESC_BYTE_((uint8_t)fu64.u[1])
fu64.u[1] >>= 8U;
}
QS_priv_.head = head; /* save the head */
QS_priv_.chksum = chksum; /* save the checksum */
/*! @brief QS software tracing parameters for QS input (QS-RX) */
* current objects
* pointer to the start of the ring buffer
* offset of the end of the ring buffer
* offset to where next byte will be inserted
* offset of where next byte will be extracted
* QUTest event loop is running
/*! the only instance of the QS-RX object (Singleton)
* @static @private @memberof QS_rx
*/
/*! Kinds of objects used in QS_setCurrObj() and QS_queryCurrObj()
* @static @public @memberof QS_rx
*/
{
SM_OBJ, /*!< state machine object */
AO_OBJ, /*!< active object */
MP_OBJ, /*!< event pool object */
EQ_OBJ, /*!< raw queue object */
TE_OBJ, /*!< time event object */
AP_OBJ, /*!< generic Application-specific object */
MAX_OBJ
};
/*! Object combinations for QS_setCurrObj() and QS_queryCurrObj()
* @static @public @memberof QS_rx
*/
{
SM_AO_OBJ = (enum_t)MAX_OBJ /*!< combination of SM and AO */
};
/*! Initialize the QS-RX data buffer
* @static @public @memberof QS_rx
*
* @details
* This function should be called from QS::onStartup() to provide QS-RX
* with the receive data buffer.
*
* @param[in] sto pointer to the memory block for input buffer
* @param[in] stoSize the size of this block [bytes]. The size of the
* QS-RX buffer cannot exceed 64KB.
*
* @note
* QS-RX can work with quite small data buffers, but you will start
* losing data if the buffer is not drained fast enough (e.g., in the
* idle task).
*
* @note
* If the data input rate exceeds the QS-RX processing rate, the data
* will be lost, but the QS protocol will notice that:
* (1) that the checksum in the incomplete QS records will fail; and
* (2) the sequence counter in QS records will show discontinuities.
*
* The QS-RX channel will report any data errors by sending the
* QS_RX_DATA_ERROR trace record.
*/
QS_rxPriv_.buf = &sto[0];
QS_rxPriv_.end = (QSCtr)stoSize;
QS_rxPriv_.head = 0U;
QS_rxPriv_.tail = 0U;
QS_rxPriv_.currObj[SM_OBJ] = (void *)0;
QS_rxPriv_.currObj[AO_OBJ] = (void *)0;
QS_rxPriv_.currObj[MP_OBJ] = (void *)0;
QS_rxPriv_.currObj[EQ_OBJ] = (void *)0;
QS_rxPriv_.currObj[TE_OBJ] = (void *)0;
QS_rxPriv_.currObj[AP_OBJ] = (void *)0;
QS_RX_TRAN_(WAIT4_SEQ);
l_rx.esc = 0U;
l_rx.seq = 0U;
l_rx.chksum = 0U;
QS_beginRec_((uint_fast8_t)QS_OBJ_DICT);
QS_OBJ_PRE_(&QS_rxPriv_);
QS_STR_PRE_("QS_RX");
QS_endRec_();
/* no QS_REC_DONE(), because QS is not running yet */
#ifdef Q_UTEST
QS_testData.tpNum = 0U;
QS_testData.testTime = 0U;
#endif /* Q_UTEST */
/*! Put one byte into the QS-RX lock-free buffer
* @static @public @memberof QS_rx
*/
QSCtr head = QS_rxPriv_.head + 1U;
if (head == QS_rxPriv_.end) {
head = 0U;
}
if (head != QS_rxPriv_.tail) { /* buffer NOT full? */
QS_rxPriv_.buf[QS_rxPriv_.head] = b;
QS_rxPriv_.head = head; /* update the head to a *valid* index */
return true; /* byte placed in the buffer */
}
else {
return false; /* byte NOT placed in the buffer */
}
/*! Obtain the number of free bytes in the QS-RX data buffer
* @static @public @memberof QS_rx
*
* @details
* This function is intended to be called from the ISR that reads the
* QS-RX bytes from the QSPY application. The function returns the
* conservative number of free bytes currently available in the buffer,
* assuming that the head pointer is not being moved concurrently.
* The tail pointer might be moving, meaning that bytes can be
* concurrently removed from the buffer.
*/
QSCtr const head = QS_rxPriv_.head;
if (head == QS_rxPriv_.tail) { /* buffer empty? */
return (uint16_t)(QS_rxPriv_.end - 1U);
}
else if (head < QS_rxPriv_.tail) {
return (uint16_t)(QS_rxPriv_.tail - (head + 1U));
}
else {
return (uint16_t)(QS_rxPriv_.end + QS_rxPriv_.tail - (head + 1U));
}
/*! Perform the QS-RX input (implemented in some QS ports)
* @static @public @memberof QS_rx
*/
/*! Set the "current object" in the Target
* @static @public @memberof QS_rx
*
* @details
* This function sets the "current object" in the Target.
*/
Q_REQUIRE_ID(100, obj_kind < Q_DIM(QS_rxPriv_.currObj));
QS_rxPriv_.currObj[obj_kind] = obj_ptr;
/*! Query the "current object" in the Target
* @static @public @memberof QS_rx
*
* @details
* This function programmatically generates the response to the query for
* a "current object".
*/
if (QS_rxPriv_.currObj[obj_kind] != (void *)0) {
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_QUERY_DATA);
QS_TIME_PRE_(); /* timestamp */
QS_U8_PRE_(obj_kind); /* object kind */
QS_OBJ_PRE_(QS_rxPriv_.currObj[obj_kind]);
switch (obj_kind) {
case SM_OBJ: /* intentionally fall through */
case AO_OBJ:
QS_FUN_PRE_((*((QHsm *)QS_rxPriv_.currObj[obj_kind])->vptr
->getStateHandler)(
((QHsm *)QS_rxPriv_.currObj[obj_kind])));
break;
case MP_OBJ:
QS_MPC_PRE_(((QMPool *)QS_rxPriv_.currObj[obj_kind])
->nFree);
QS_MPC_PRE_(((QMPool *)QS_rxPriv_.currObj[obj_kind])
->nMin);
break;
case EQ_OBJ:
QS_EQC_PRE_(((QEQueue *)QS_rxPriv_.currObj[obj_kind])
->nFree);
QS_EQC_PRE_(((QEQueue *)QS_rxPriv_.currObj[obj_kind])
->nMin);
break;
case TE_OBJ:
QS_OBJ_PRE_(((QTimeEvt *)QS_rxPriv_.currObj[obj_kind])
->act);
QS_TEC_PRE_(((QTimeEvt *)QS_rxPriv_.currObj[obj_kind])
->ctr);
QS_TEC_PRE_(((QTimeEvt *)QS_rxPriv_.currObj[obj_kind])
->interval);
QS_SIG_PRE_(((QTimeEvt *)QS_rxPriv_.currObj[obj_kind])
->super.sig);
QS_U8_PRE_ (((QTimeEvt *)QS_rxPriv_.currObj[obj_kind])
->super.refCtr_);
break;
default:
/* intentionally empty */
break;
}
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
}
else {
QS_rxReportError_((int8_t)QS_RX_QUERY_CURR);
}
/*! Parse all bytes present in the QS-RX data buffer
* @static @public @memberof QS_rx
*/
QSCtr tail = QS_rxPriv_.tail;
while (QS_rxPriv_.head != tail) { /* QS-RX buffer NOT empty? */
uint8_t b = QS_rxPriv_.buf[tail];
++tail;
if (tail == QS_rxPriv_.end) {
tail = 0U;
}
QS_rxPriv_.tail = tail; /* update the tail to a *valid* index */
if (l_rx.esc != 0U) { /* escaped byte arrived? */
l_rx.esc = 0U;
b ^= QS_ESC_XOR;
l_rx.chksum += b;
QS_rxParseData_(b);
}
else if (b == QS_ESC) {
l_rx.esc = 1U;
}
else if (b == QS_FRAME) {
/* get ready for the next frame */
b = l_rx.state; /* save the current state in b */
l_rx.esc = 0U;
QS_RX_TRAN_(WAIT4_SEQ);
if (l_rx.chksum == QS_GOOD_CHKSUM) {
l_rx.chksum = 0U;
QS_rxHandleGoodFrame_(b);
}
else { /* bad checksum */
l_rx.chksum = 0U;
QS_rxReportError_(0x41);
QS_rxHandleBadFrame_(b);
}
}
else {
l_rx.chksum += b;
QS_rxParseData_(b);
}
}
/*! internal function to handle incoming (QS-RX) packet
* @static @private @memberof QS_rx
*/
uint8_t i;
uint8_t *ptr;
QS_CRIT_STAT_
switch (state) {
case WAIT4_INFO_FRAME: {
/* no need to report Ack or Done */
QS_CRIT_E_();
QS_target_info_pre_(0U); /* send only Target info */
QS_CRIT_X_();
break;
}
case WAIT4_RESET_FRAME: {
/* no need to report Ack or Done, because Target resets */
QS_onReset(); /* reset the Target */
break;
}
case WAIT4_CMD_PARAM1: /* intentionally fall-through */
case WAIT4_CMD_PARAM2: /* intentionally fall-through */
case WAIT4_CMD_PARAM3: /* intentionally fall-through */
case WAIT4_CMD_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_COMMAND);
QS_onCommand(l_rx.var.cmd.cmdId, l_rx.var.cmd.param1,
l_rx.var.cmd.param2, l_rx.var.cmd.param3);
#ifdef Q_UTEST
#if Q_UTEST != 0
QS_processTestEvts_(); /* process all events produced */
#endif /* Q_UTEST != 0 */
#endif /* Q_UTEST */
QS_rxReportDone_((int8_t)QS_RX_COMMAND);
break;
}
case WAIT4_TICK_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_TICK);
#ifdef Q_UTEST
QTimeEvt_tick1_((uint_fast8_t)l_rx.var.tick.rate, &QS_rxPriv_);
#if Q_UTEST != 0
QS_processTestEvts_(); /* process all events produced */
#endif /* Q_UTEST != 0 */
#else
QTimeEvt_tick_((uint_fast8_t)l_rx.var.tick.rate, &QS_rxPriv_);
#endif /* Q_UTEST */
QS_rxReportDone_((int8_t)QS_RX_TICK);
break;
}
case WAIT4_PEEK_FRAME: {
/* no need to report Ack or Done */
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_PEEK_DATA);
ptr = (uint8_t *)QS_rxPriv_.currObj[AP_OBJ];
ptr = &ptr[l_rx.var.peek.offs];
QS_TIME_PRE_(); /* timestamp */
QS_U16_PRE_(l_rx.var.peek.offs); /* data offset */
QS_U8_PRE_(l_rx.var.peek.size); /* data size */
QS_U8_PRE_(l_rx.var.peek.num); /* number of data items */
for (i = 0U; i < l_rx.var.peek.num; ++i) {
switch (l_rx.var.peek.size) {
case 1:
QS_U8_PRE_(ptr[i]);
break;
case 2:
QS_U16_PRE_(((uint16_t *)ptr)[i]);
break;
case 4:
QS_U32_PRE_(((uint32_t *)ptr)[i]);
break;
default:
/* intentionally empty */
break;
}
}
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
break;
}
case WAIT4_POKE_DATA: {
/* received less than expected poke data items */
QS_rxReportError_((int8_t)QS_RX_POKE);
break;
}
case WAIT4_POKE_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_POKE);
/* no need to report done */
break;
}
case WAIT4_FILL_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_FILL);
ptr = (uint8_t *)QS_rxPriv_.currObj[AP_OBJ];
ptr = &ptr[l_rx.var.poke.offs];
for (i = 0U; i < l_rx.var.poke.num; ++i) {
switch (l_rx.var.poke.size) {
case 1:
ptr[i] = (uint8_t)l_rx.var.poke.data;
break;
case 2:
((uint16_t *)ptr)[i]
= (uint16_t)l_rx.var.poke.data;
break;
case 4:
((uint32_t *)ptr)[i] = l_rx.var.poke.data;
break;
default:
/* intentionally empty */
break;
}
}
break;
}
case WAIT4_FILTER_FRAME: {
QS_rxReportAck_(l_rx.var.flt.recId);
/* apply the received filters */
if (l_rx.var.flt.recId == (int8_t)QS_RX_GLB_FILTER) {
for (i = 0U; i < Q_DIM(QS_priv_.glbFilter); ++i) {
QS_priv_.glbFilter[i] = l_rx.var.flt.data[i];
}
/* leave the "not maskable" filters enabled,
* see qs.h, Miscellaneous QS records (not maskable)
*/
QS_priv_.glbFilter[0] |= 0x01U;
QS_priv_.glbFilter[7] |= 0xFCU;
QS_priv_.glbFilter[8] |= 0x7FU;
/* never enable the last 3 records (0x7D, 0x7E, 0x7F) */
QS_priv_.glbFilter[15] &= 0x1FU;
}
else if (l_rx.var.flt.recId == (int8_t)QS_RX_LOC_FILTER) {
for (i = 0U; i < Q_DIM(QS_priv_.locFilter); ++i) {
QS_priv_.locFilter[i] = l_rx.var.flt.data[i];
}
/* leave QS_ID == 0 always on */
QS_priv_.locFilter[0] |= 0x01U;
}
else {
QS_rxReportError_(l_rx.var.flt.recId);
}
/* no need to report Done */
break;
}
case WAIT4_OBJ_FRAME: {
i = l_rx.var.obj.kind;
if (i < (uint8_t)MAX_OBJ) {
if (l_rx.var.obj.recId == (int8_t)QS_RX_CURR_OBJ) {
QS_rxPriv_.currObj[i] = (void *)l_rx.var.obj.addr;
QS_rxReportAck_((int8_t)QS_RX_CURR_OBJ);
}
else if (l_rx.var.obj.recId == (int8_t)QS_RX_AO_FILTER) {
if (l_rx.var.obj.addr != 0U) {
int_fast16_t const filter =
(int_fast16_t)((QActive *)l_rx.var.obj.addr)->prio;
QS_locFilter_((i == 0U)
? filter
:-filter);
QS_rxReportAck_((int8_t)QS_RX_AO_FILTER);
}
else {
QS_rxReportError_((int8_t)QS_RX_AO_FILTER);
}
}
else {
QS_rxReportError_(l_rx.var.obj.recId);
}
}
/* both SM and AO */
else if (i == (uint8_t)SM_AO_OBJ) {
if (l_rx.var.obj.recId == (int8_t)QS_RX_CURR_OBJ) {
QS_rxPriv_.currObj[SM_OBJ] = (void *)l_rx.var.obj.addr;
QS_rxPriv_.currObj[AO_OBJ] = (void *)l_rx.var.obj.addr;
}
QS_rxReportAck_(l_rx.var.obj.recId);
}
else {
QS_rxReportError_(l_rx.var.obj.recId);
}
break;
}
case WAIT4_QUERY_FRAME: {
QS_queryCurrObj(l_rx.var.obj.kind);
break;
}
case WAIT4_EVT_FRAME: {
/* NOTE: Ack was already reported in the WAIT4_EVT_LEN state */
#ifdef Q_UTEST
QS_onTestEvt(l_rx.var.evt.e); /* adjust the event, if needed */
#endif /* Q_UTEST */
i = 0U; /* use 'i' as status, 0 == success,no-recycle */
if (l_rx.var.evt.prio == 0U) { /* publish */
QActive_publish_(l_rx.var.evt.e, &QS_rxPriv_, 0U);
}
else if (l_rx.var.evt.prio < QF_MAX_ACTIVE) {
if (!QACTIVE_POST_X(QActive_registry_[l_rx.var.evt.prio],
l_rx.var.evt.e,
0U, /* margin */
&QS_rxPriv_))
{
/* failed QACTIVE_POST() recycles the event */
i = 0x80U; /* failure status, no recycle */
}
}
else if (l_rx.var.evt.prio == 255U) { /* special prio */
/* dispatch to the current SM object */
if (QS_rxPriv_.currObj[SM_OBJ] != (void *)0) {
/* increment the ref-ctr to simulate the situation
* when the event is just retreived from a queue.
* This is expected for the following QF_gc() call.
*/
++l_rx.var.evt.e->refCtr_;
QHSM_DISPATCH((QHsm *)QS_rxPriv_.currObj[SM_OBJ],
l_rx.var.evt.e, 0U);
i = 0x01U; /* success status, recycle needed */
}
else {
i = 0x81U; /* failure status, recycle needed */
}
}
else if (l_rx.var.evt.prio == 254U) { /* special prio */
/* init the current SM object" */
if (QS_rxPriv_.currObj[SM_OBJ] != (void *)0) {
/* increment the ref-ctr to simulate the situation
* when the event is just retreived from a queue.
* This is expected for the following QF_gc() call.
*/
++l_rx.var.evt.e->refCtr_;
QHSM_INIT((QHsm *)QS_rxPriv_.currObj[SM_OBJ],
l_rx.var.evt.e, 0U);
i = 0x01U; /* success status, recycle needed */
}
else {
i = 0x81U; /* failure status, recycle needed */
}
}
else if (l_rx.var.evt.prio == 253U) { /* special prio */
/* post to the current AO */
if (QS_rxPriv_.currObj[AO_OBJ] != (void *)0) {
if (!QACTIVE_POST_X(
(QActive *)QS_rxPriv_.currObj[AO_OBJ],
l_rx.var.evt.e,
0U, /* margin */
&QS_rxPriv_))
{
/* failed QACTIVE_POST() recycles the event */
i = 0x80U; /* failure status, no recycle */
}
}
else {
i = 0x81U; /* failure status, recycle needed */
}
}
else {
i = 0x81U; /* failure status, recycle needed */
}
#if (QF_MAX_EPOOL > 0U)
if ((i & 0x01U) != 0U) { /* recycle needed? */
QF_gc(l_rx.var.evt.e);
}
#endif
if ((i & 0x80U) != 0U) { /* failure? */
QS_rxReportError_((int8_t)QS_RX_EVENT);
}
else {
#ifdef Q_UTEST
#if Q_UTEST != 0
QS_processTestEvts_(); /* process all events produced */
#endif /* Q_UTEST != 0 */
#endif /* Q_UTEST */
QS_rxReportDone_((int8_t)QS_RX_EVENT);
}
break;
}
#ifdef Q_UTEST
case WAIT4_TEST_SETUP_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_TEST_SETUP);
QS_testData.tpNum = 0U; /* clear the Test-Probes */
QS_testData.testTime = 0U; /* clear the time tick */
/* don't clear current objects */
QS_onTestSetup(); /* application-specific test setup */
/* no need to report Done */
break;
}
case WAIT4_TEST_TEARDOWN_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_TEST_TEARDOWN);
QS_onTestTeardown(); /* application-specific test teardown */
/* no need to report Done */
break;
}
case WAIT4_TEST_CONTINUE_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_TEST_CONTINUE);
QS_rxPriv_.inTestLoop = false; /* exit the QUTest loop */
/* no need to report Done */
break;
}
case WAIT4_TEST_PROBE_FRAME: {
QS_rxReportAck_((int8_t)QS_RX_TEST_PROBE);
Q_ASSERT_ID(815, QS_testData.tpNum
< (sizeof(QS_testData.tpBuf) / sizeof(QS_testData.tpBuf[0])));
QS_testData.tpBuf[QS_testData.tpNum] = l_rx.var.tp;
++QS_testData.tpNum;
/* no need to report Done */
break;
}
#endif /* Q_UTEST */
case ERROR_STATE: {
/* keep ignoring all bytes until new frame */
break;
}
default: {
QS_rxReportError_(0x47);
break;
}
}
/*! callback function to reset the Target (to be implemented in the BSP)
* @static @public @memberof QS_rx
*/
/*! Callback function to execute user commands (to be implemented in BSP)
* @static @public @memberof QS_rx
*/
/*! Put one byte into the QS RX lock-free buffer
* @static @public @memberof QS_rx
*/
QSCtr head = QS_rxPriv_.head + 1U;
if (head == QS_rxPriv_.end) {
head = 0U;
}
if (head != QS_rxPriv_.tail) { /* buffer NOT full? */
QS_rxPriv_.buf[QS_rxPriv_.head] = b;
QS_rxPriv_.head = head; /* update the head to a *valid* index */
return true; /* byte placed in the buffer */
}
else {
return false; /* byte NOT placed in the buffer */
}
/*! @brief Test Probe attributes */
{
QSFun addr;
uint32_t data;
uint8_t idx;
};
/*! @brief QUTest data */
{
struct QS_TProbe tpBuf[16]; /*!< buffer of Test-Probes received so far */
uint8_t tpNum; /*!< current number of Test-Probes */
QSTimeCtr testTime; /*!< test time (tick counter) */
};
/*! QUTest data */
/*! internal function to pause test and enter the test event loop */
QS_beginRec_((uint_fast8_t)QS_TEST_PAUSED);
QS_endRec_();
QS_onTestLoop();
/*! get the test probe data for the given API */
uint32_t data = 0U;
uint_fast8_t i;
for (i = 0U; i < QS_testData.tpNum; ++i) {
uint_fast8_t j;
if (QS_testData.tpBuf[i].addr == (QSFun)api) {
QS_CRIT_STAT_
data = QS_testData.tpBuf[i].data;
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_TEST_PROBE_GET);
QS_TIME_PRE_(); /* timestamp */
QS_FUN_PRE_(api); /* the calling API */
QS_U32_PRE_(data); /* the Test-Probe data */
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
--QS_testData.tpNum; /* one less Test-Probe */
/* move all remaining entries in the buffer up by one */
for (j = i; j < QS_testData.tpNum; ++j) {
QS_testData.tpBuf[j] = QS_testData.tpBuf[j + 1U];
}
break; /* we are done (Test-Probe retreived) */
}
}
return data;
/*! callback to setup a unit test inside the Target */
/*! callback to teardown after a unit test inside the Target */
/*! callback to "massage" the test event before dispatching/posting it */
/*! callback to examine an event that is about to be posted */
/*! callback to run the test loop */
/*! record-ID for posting events */
124
/*! internal function to process posted events during test */
QS_TEST_PROBE_DEF(&QS_processTestEvts_)
/* return immediately (do nothing) for Test Probe != 0 */
QS_TEST_PROBE(return;)
while (QPSet_notEmpty(&QF_readySet_)) {
uint_fast8_t const p = QPSet_findMax(&QF_readySet_);
QActive * const a = QActive_registry_[p];
/* perform the run-to-completion (RTC) step...
* 1. retrieve the event from the AO's event queue, which by this
* time must be non-empty and The "Vanialla" kernel asserts it.
* 2. dispatch the event to the AO's state machine.
* 3. determine if event is garbage and collect it if so
*/
QEvt const * const e = QActive_get_(a);
QHSM_DISPATCH(&a->super, e, a->prio);
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
if (a->eQueue.frontEvt == (QEvt *)0) { /* empty queue? */
QPSet_remove(&QF_readySet_, p);
}
}
/*! QF initialization for QUTest */
/* Clear the internal QF variables, so that the framework can start
* correctly even if the startup code fails to clear the uninitialized
* data (as is required by the C Standard).
*/
QF_maxPool_ = 0U;
QF_intLock_ = 0U;
QF_intNest_ = 0U;
QF_bzero(&QActive_registry_[0], sizeof(QActive_registry_));
QF_bzero(&QF_readySet_, sizeof(QF_readySet_));
/*! stop the QF customization for QUTest */
QS_onReset();
/*! QF_run() customization for QUTest */
/* function dictionaries for the standard API */
QS_FUN_DICTIONARY(&QActive_post_);
QS_FUN_DICTIONARY(&QActive_postLIFO_);
QS_FUN_DICTIONARY(&QS_processTestEvts_);
/* produce the QS_QF_RUN trace record */
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QF_RUN, 0U)
QS_END_PRE_()
QS_processTestEvts_(); /* process all events posted so far */
QS_onTestLoop(); /* run the test loop */
QS_onCleanup(); /* application cleanup */
return 0; /* return no error */
/*! QActive active object class customization for QUTest */
/*! QActive_start_() customization for QUTest
* @public @memberof QActive
*/
Q_UNUSED_PAR(stkSto);
Q_UNUSED_PAR(stkSize);
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = (uint8_t)(prioSpec >> 8U); /* preemption-threshold */
QActive_register_(me); /* make QF aware of this active object */
QEQueue_init(&me->eQueue, qSto, qLen); /* initialize the built-in queue */
QHSM_INIT(&me->super, par, me->prio); /* the top-most initial tran. */
/*! Stops execution of an active object and unregisters the object
* with the framework customized for QUTest
* @public @memberof QActive
*/
QActive_unsubscribeAll(me); /* unsubscribe from all events */
QActive_unregister_(me); /* un-register this active object */
/*! QTimeEvt class customization for QUTest */
/*! Processes one clock tick for QUTest */
QF_CRIT_STAT_
QF_CRIT_E_();
QTimeEvt *prev = &QTimeEvt_timeEvtHead_[tickRate];
QS_BEGIN_NOCRIT_PRE_(QS_QF_TICK, 0U)
++prev->ctr;
QS_TEC_PRE_(prev->ctr); /* tick ctr */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
/* is current Time Event object provided? */
QTimeEvt *t = (QTimeEvt *)QS_rxPriv_.currObj[TE_OBJ];
if (t != (QTimeEvt *)0) {
/* the time event must be armed */
Q_ASSERT_ID(810, t->ctr != 0U);
/* temp. for volatile */
QActive * const act = (QActive * const)(t->act);
/* the recipient AO must be provided */
Q_ASSERT_ID(820, act != (QActive *)0);
/* periodic time evt? */
if (t->interval != 0U) {
t->ctr = t->interval; /* rearm the time event */
}
else { /* one-shot time event: automatically disarm */
t->ctr = 0U; /* auto-disarm */
/* mark time event 't' as NOT linked */
t->super.refCtr_ &= (uint8_t)(~(uint8_t)QTE_IS_LINKED);
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_AUTO_DISARM, act->prio)
QS_OBJ_PRE_(t); /* this time event object */
QS_OBJ_PRE_(act); /* the target AO */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_TIMEEVT_POST, act->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(t); /* the time event object */
QS_SIG_PRE_(t->super.sig); /* signal of this time event */
QS_OBJ_PRE_(act); /* the target AO */
QS_U8_PRE_(tickRate); /* tick rate */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_(); /* exit critical section before posting */
QACTIVE_POST(act, &t->super, sender); /* asserts if queue overflows */
QF_CRIT_E_();
}
/* update the linked list of time events */
for (;;) {
t = prev->next; /* advance down the time evt. list */
/* end of the list? */
if (t == (QTimeEvt *)0) {
/* any new time events armed since the last QTimeEvt_tick_()? */
if (QTimeEvt_timeEvtHead_[tickRate].act != (void *)0) {
/* sanity check */
Q_ASSERT_CRIT_(830, prev != (QTimeEvt *)0);
prev->next = (QTimeEvt *)QTimeEvt_timeEvtHead_[tickRate].act;
QTimeEvt_timeEvtHead_[tickRate].act = (void *)0;
t = prev->next; /* switch to the new list */
}
else {
break; /* all currently armed time evts. processed */
}
}
/* time event scheduled for removal? */
if (t->ctr == 0U) {
prev->next = t->next;
/* mark time event 't' as NOT linked */
t->super.refCtr_ &= (uint8_t)(~(uint8_t)QTE_IS_LINKED);
/* do NOT advance the prev pointer */
QF_CRIT_X_(); /* exit crit. section to reduce latency */
/* prevent merging critical sections, see NOTE1 below */
QF_CRIT_EXIT_NOP();
}
else {
prev = t; /* advance to this time event */
QF_CRIT_X_(); /* exit crit. section to reduce latency */
/* prevent merging critical sections, see NOTE1 below */
QF_CRIT_EXIT_NOP();
}
QF_CRIT_E_(); /* re-enter crit. section to continue */
}
QF_CRIT_X_();
/*! @brief QHsmDummy class
* @class QHsmDummy
* @extends QHsm
*
* @details
* ::QHsmDummy is a test double for the role of "Orthogonal Components"
* HSM objects in QUTest unit testing.
*/
/*! Constructor of the QHsmDummy HSM class
* @public @memberof QHsmDummy
*/
static struct QHsmVtable const vtable = { /* QHsm virtual table */
&QHsmDummy_init_,
&QHsmDummy_dispatch_
#ifdef Q_SPY
,&QHsm_getStateHandler_
#endif
};
/* superclass' ctor */
QHsm_ctor(&me->super, Q_STATE_CAST(0));
me->super.vptr = &vtable; /* hook the vptr */
/*! override for QHsm_init_()
* @private @memberof QHsmDummy
*/
Q_UNUSED_PAR(par);
#ifdef Q_SPY
if ((QS_priv_.flags & 0x01U) == 0U) {
QS_priv_.flags |= 0x01U;
QS_FUN_DICTIONARY(&QHsm_top);
}
#endif
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_STATE_INIT, qs_id)
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->state.fun); /* the source state */
QS_FUN_PRE_(me->temp.fun); /* the target of the initial transition */
QS_END_PRE_()
/*! override for QHsm_dispatch_()
* @private @memberof QHsmDummy
*/
QS_CRIT_STAT_
QS_BEGIN_PRE_(QS_QEP_DISPATCH, qs_id)
QS_TIME_PRE_(); /* time stamp */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this state machine object */
QS_FUN_PRE_(me->state.fun); /* the current state */
QS_END_PRE_()
/*! @brief QActiveDummy Object class
* @class QActiveDummy
* @extends QActive
*
* @details
* QActiveDummy is a test double for the role of collaborating active
* objects in QUTest unit testing.
*/
/*! Constructor of the QActiveDummy Active Object class
* @public @memberof QActiveDummy
*/
static QActiveVtable const vtable = { /* QActive virtual table */
{ &QActiveDummy_init_,
&QActiveDummy_dispatch_
#ifdef Q_SPY
,&QHsm_getStateHandler_
#endif
},
&QActiveDummy_start_,
&QActiveDummy_post_,
&QActiveDummy_postLIFO_
};
/* superclass' ctor */
QActive_ctor(&me->super, Q_STATE_CAST(0));
me->super.super.vptr = &vtable.super; /* hook the vptr */
/*! override for QHsm_init_()
* @private @memberof QActiveDummy
*/
Q_UNUSED_PAR(qs_id);
QHsmDummy_init_(me, par, ((QActive const *)me)->prio);
/*! override for QHsm_dispatch_()
* @private @memberof QActiveDummy
*/
Q_UNUSED_PAR(qs_id);
QHsmDummy_dispatch_(me, e, ((QActive const *)me)->prio);
/*! override for QActive_start_()
* @private @memberof QActiveDummy
*/
/* No special preconditions for checking parameters to allow starting
* dummy AOs the exact same way as the real counterparts.
*/
Q_UNUSED_PAR(qSto);
Q_UNUSED_PAR(qLen);
Q_UNUSED_PAR(stkSto);
Q_UNUSED_PAR(stkSize);
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = (uint8_t)(prioSpec >> 8U); /* preemption-threshold */
QActive_register_(me); /* make QF aware of this active object */
/* the top-most initial tran. (virtual) */
QHSM_INIT(&me->super, par, me->prio);
//QS_FLUSH();
/*! override for QActive_post_()
* @private @memberof QActiveDummy
*/
QS_TEST_PROBE_DEF(&QActive_post_)
/* test-probe#1 for faking queue overflow */
bool status = true;
QS_TEST_PROBE_ID(1,
status = false;
if (margin == QF_NO_MARGIN) {
/* fake assertion Mod=qf_actq,Loc=110 */
Q_onAssert("qf_actq", 110);
}
)
QF_CRIT_STAT_
QF_CRIT_E_();
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
uint_fast8_t const rec = (status ? (uint_fast8_t)QS_QF_ACTIVE_POST
: (uint_fast8_t)QS_QF_ACTIVE_POST_ATTEMPT);
QS_BEGIN_NOCRIT_PRE_(rec, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & refCtr of the evt */
QS_EQC_PRE_(0U); /* number of free entries */
QS_EQC_PRE_(margin); /* margin requested */
QS_END_NOCRIT_PRE_()
/* callback to examine the posted event under the same conditions
* as producing the #QS_QF_ACTIVE_POST trace record, which are:
* the local filter for this AO ('me->prio') is set
*/
if ((QS_priv_.locFilter[me->prio >> 3U]
& (1U << (me->prio & 7U))) != 0U)
{
QS_onTestPost(sender, me, e, status);
}
QF_CRIT_X_();
/* recycle the event immediately, because it was not really posted */
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
return status; /* the event is "posted" correctly */
/*! override for QActive_postLIFO_()
* @private @memberof QActiveDummy
*/
QS_TEST_PROBE_DEF(&QActive_postLIFO_)
/* test-probe#1 for faking queue overflow */
QS_TEST_PROBE_ID(1,
/* fake assertion Mod=qf_actq,Loc=210 */
Q_onAssert("qf_actq", 210);
)
QF_CRIT_STAT_
QF_CRIT_E_();
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_LIFO, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(me); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & refCtr of the evt */
QS_EQC_PRE_(0U); /* number of free entries */
QS_EQC_PRE_(0U); /* min number of free entries */
QS_END_NOCRIT_PRE_()
/* callback to examine the posted event under the same conditions
* as producing the #QS_QF_ACTIVE_POST trace record, which are:
* the local filter for this AO ('me->prio') is set
*/
if ((QS_priv_.locFilter[me->prio >> 3U]
& (1U << (me->prio & 7U))) != 0U)
{
QS_onTestPost((QActive *)0, me, e, true);
}
QF_CRIT_X_();
/* recycle the event immediately, because it was not really posted */
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
/*! @brief QV cooperative kernel
* @class QV
*/
{
uint8_t dummy; /*< dummy attribute */
} QV;
/*! QV idle callback (customized in BSPs)
* @static @public @memberof QV
*
* @details
* QV_onIdle() is called by the cooperative QV kernel (from QF_run()) when
* the scheduler detects that no events are available for active objects
* (the idle condition). This callback gives the application an opportunity
* to enter a power-saving CPU mode, or perform some other idle processing
* (such as QS software tracing output).
*
* @note
* QV_onIdle() is invoked with interrupts **DISABLED** because the idle
* condition can be asynchronously changed at any time by an interrupt.
* QV_onIdle() MUST enable the interrupts internally, but not before
* putting the CPU into the low-power mode. (Ideally, enabling interrupts and
* low-power mode should happen atomically). At the very least, the function
* MUST enable interrupts, otherwise interrupts will remain disabled
* permanently.
*/
/*! QF initialization for QV */
#if (QF_MAX_EPOOL > 0U)
QF_maxPool_ = 0U;
#endif
QF_bzero(&QTimeEvt_timeEvtHead_[0], sizeof(QTimeEvt_timeEvtHead_));
QF_bzero(&QActive_registry_[0], sizeof(QActive_registry_));
QF_bzero(&QF_readySet_, sizeof(QF_readySet_));
#ifdef QV_INIT
QV_INIT(); /* port-specific initialization of the QV kernel */
#endif
/*! stop the QF customization for QV
*
* @sa QF_onCleanup()
*/
QF_onCleanup(); /* application-specific cleanup callback */
/* nothing else to do for the cooperative QV kernel */
/*! QF_run() customization for QV kernel */
#ifdef Q_SPY
/* produce the QS_QF_RUN trace record */
QF_INT_DISABLE();
QS_beginRec_((uint_fast8_t)QS_QF_RUN);
QS_endRec_();
QF_INT_ENABLE();
#endif /* Q_SPY */
QF_onStartup(); /* application-specific startup callback */
QF_INT_DISABLE();
#ifdef QV_START
QV_START(); /* port-specific startup of the QV kernel */
#endif
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
uint8_t pprev = 0U; /* previously used priority */
#endif /* (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) */
for (;;) { /* QV event loop... */
/* find the maximum priority AO ready to run */
if (QPSet_notEmpty(&QF_readySet_)) {
uint8_t const p = (uint8_t)QPSet_findMax(&QF_readySet_);
QActive * const a = QActive_registry_[p];
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_NEXT, p)
QS_TIME_PRE_(); /* timestamp */
QS_2U8_PRE_(p, /* priority of the scheduled AO */
pprev); /* previous priority */
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(((pprev != 0U)
? QActive_registry_[pprev]
: (QActive *)0), a);
#endif /* QF_ON_CONTEXT_SW */
pprev = p; /* update previous priority */
#endif /* (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) */
QF_INT_ENABLE();
/* perform the run-to-completion (RTC) step...
* 1. retrieve the event from the AO's event queue, which
* by this time must be non-empty (and QV asserts it).
* 2. dispatch the event to the AO's state machine.
* 3. determine if event is garbage and collect it if so
*/
QEvt const * const e = QActive_get_(a);
QHSM_DISPATCH(&a->super, e, a->prio);
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
QF_INT_DISABLE();
if (a->eQueue.frontEvt == (QEvt *)0) { /* empty queue? */
QPSet_remove(&QF_readySet_, p);
}
}
else { /* no AO ready to run --> idle */
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (pprev != 0U) {
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_IDLE, pprev)
QS_TIME_PRE_(); /* timestamp */
QS_U8_PRE_(pprev); /* previous priority */
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QActive_registry_[pprev], (QActive *)0);
#endif /* QF_ON_CONTEXT_SW */
pprev = 0U; /* update previous priority */
}
#endif /* (defined QF_ON_CONTEXT_SW) || (defined Q_SPY) */
/* QV_onIdle() must be called with interrupts DISABLED
* because the determination of the idle condition (all event
* queues empty) can change at any time by an interrupt posting
* events to a queue.
*
* NOTE: QV_onIdle() MUST enable interrupts internally,
* ideally at the same time as putting the CPU into a power-
* saving mode.
*/
QV_onIdle();
QF_INT_DISABLE(); /* disable interrupts before looping back */
}
}
#ifdef __GNUC__ /* GNU compiler? */
return 0;
#endif
/*! QActive active object class customization for QV */
/*! Starts execution of an active object and registers the object
* with the framework customized for QV
*/
Q_UNUSED_PAR(stkSto); /* not needed in QV */
Q_UNUSED_PAR(stkSize); /* not needed in QV */
/*! @pre Stack storage must not be provided because the QV kernel
* does not need per-AO stacks.
*/
Q_REQUIRE_ID(500, stkSto == (void *)0);
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = (uint8_t)(prioSpec >> 8U); /* preemption-threshold */
QActive_register_(me); /* make QF aware of this active object */
QEQueue_init(&me->eQueue, qSto, qLen); /* init the built-in queue */
QHSM_INIT(&me->super, par, me->prio); /* top-most initial tran. */
QS_FLUSH(); /* flush the trace buffer to the host */
/*! QV scheduler lock status (not needed in QV) */
/*! QV scheduler locking (not needed in QV) */
((void)0)
/*! QV scheduler unlocking (not needed in QV) */
((void)0)
* QV native event queue waiting
\
Q_ASSERT_ID(0, (me_)->eQueue.frontEvt != (QEvt *)0)
/*! QV native event queue signaling */
\
QPSet_insert(&QF_readySet_, (uint_fast8_t)(me_)->prio)
/*! @brief QK preemptive non-blocking kernel
* @class QK
*/
{
uint8_t volatile actPrio; /*!< QF prio of the active AO */
uint8_t volatile nextPrio; /*!< QF prio of the next AO to execute */
uint8_t volatile actThre; /*!< active preemption-threshold */
uint8_t volatile lockCeil; /*!< lock preemption-ceiling (0==no-lock) */
uint8_t volatile lockHolder; /*!< QF prio of the AO holding the lock */
} QK;
/*! attributes of the QK kernel */
/*! QK selective scheduler lock
*
* @details
* This function locks the QK scheduler to the specified ceiling.
*
* @param[in] ceiling preemption ceiling to which the QK scheduler
* needs to be locked
*
* @returns
* The previous QK Scheduler lock status, which is to be used to unlock
* the scheduler by restoring its previous lock status in
* QK_schedUnlock().
*
* @note
* QK_schedLock() must be always followed by the corresponding
* QK_schedUnlock().
*
* @sa QK_schedUnlock()
*
* @usage
* The following example shows how to lock and unlock the QK scheduler:
* @include qk_lock.c
*/
QF_CRIT_STAT_
QF_CRIT_E_();
/*! @pre The QK scheduler lock cannot be called from an ISR */
Q_REQUIRE_ID(600, !QK_ISR_CONTEXT_());
/* first store the previous lock prio */
QSchedStatus stat;
if (ceiling > QK_attr_.lockCeil) { /* raising the lock ceiling? */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_LOCK, 0U)
QS_TIME_PRE_(); /* timestamp */
/* the previous lock ceiling & new lock ceiling */
QS_2U8_PRE_(QK_attr_.lockCeil, (uint8_t)ceiling);
QS_END_NOCRIT_PRE_()
/* previous status of the lock */
stat = (QSchedStatus)QK_attr_.lockHolder;
stat |= (QSchedStatus)QK_attr_.lockCeil << 8U;
/* new status of the lock */
QK_attr_.lockHolder = QK_attr_.actPrio;
QK_attr_.lockCeil = (uint8_t)ceiling;
}
else {
stat = 0xFFU; /* scheduler not locked */
}
QF_CRIT_X_();
return stat; /* return the status to be saved in a stack variable */
/*! QK selective scheduler unlock
*
* @details
* This function unlocks the QK scheduler to the previous status.
*
* @param[in] stat previous QK Scheduler lock status returned from
* QK_schedLock()
* @note
* QK_schedUnlock() must always follow the corresponding
* QK_schedLock().
*
* @sa QK_schedLock()
*
* @usage
* The following example shows how to lock and unlock the QK scheduler:
* @include qk_lock.c
*/
/* has the scheduler been actually locked by the last QK_schedLock()? */
if (stat != 0xFFU) {
uint8_t const lockCeil = QK_attr_.lockCeil;
uint8_t const prevCeil = (uint8_t)(stat >> 8U);
QF_CRIT_STAT_
QF_CRIT_E_();
/*! @pre The scheduler cannot be unlocked:
* - from the ISR context; and
* - the current lock ceiling must be greater than the previous
*/
Q_REQUIRE_ID(700, (!QK_ISR_CONTEXT_())
&& (lockCeil > prevCeil));
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_UNLOCK, 0U)
QS_TIME_PRE_(); /* timestamp */
// current lock ceiling (old), previous lock ceiling (new) */
QS_2U8_PRE_(lockCeil, prevCeil);
QS_END_NOCRIT_PRE_()
/* restore the previous lock ceiling and lock holder */
QK_attr_.lockCeil = prevCeil;
QK_attr_.lockHolder = (uint8_t)(stat & 0xFFU);
/* find if any AOs should be run after unlocking the scheduler */
if (QK_sched_() != 0U) { /* preemption needed? */
QK_activate_(); /* activate any unlocked AOs */
}
QF_CRIT_X_();
}
/*! QK idle callback (customized in BSPs for QK)
* @static @public @memberof QK
*
* @details
* QK_onIdle() is called continuously by the QK idle loop. This callback
* gives the application an opportunity to enter a power-saving CPU mode,
* or perform some other idle processing.
*
* @note
* QK_onIdle() is invoked with interrupts enabled and must also return with
* interrupts enabled.
*/
/*! QF initialization for QK */
#if (QF_MAX_EPOOL > 0U)
QF_maxPool_ = 0U;
#endif
QF_bzero(&QTimeEvt_timeEvtHead_[0], sizeof(QTimeEvt_timeEvtHead_));
QF_bzero(&QActive_registry_[0], sizeof(QActive_registry_));
QF_bzero(&QF_readySet_, sizeof(QF_readySet_));
QF_bzero(&QK_attr_, sizeof(QK_attr_));
/* setup the QK scheduler as initially locked and not running */
QK_attr_.lockCeil = (QF_MAX_ACTIVE + 1U); /* scheduler locked */
/* QK idle AO object (const in ROM) */
static QActive const idle_ao = { (struct QHsmVtable const *)0 };
/* register the idle AO object (cast 'const' away) */
QActive_registry_[0] = (QActive *)&idle_ao;
#ifdef QK_INIT
QK_INIT(); /* port-specific initialization of the QK kernel */
#endif
/*! stop the QF customization for QK
*
* @sa QF_onCleanup()
*/
QF_onCleanup(); /* application-specific cleanup callback */
/* nothing else to do for the preemptive QK kernel */
/*! QF_run() customization for QK kernel */
#ifdef Q_SPY
/* produce the QS_QF_RUN trace record */
QF_INT_DISABLE();
QS_beginRec_((uint_fast8_t)QS_QF_RUN);
QS_endRec_();
QF_INT_ENABLE();
#endif /* Q_SPY */
QF_onStartup(); /* application-specific startup callback */
QF_INT_DISABLE();
QK_attr_.lockCeil = 0U; /* unlock the QK scheduler */
/* activate AOs to process events posted so far */
if (QK_sched_() != 0U) {
QK_activate_();
}
#ifdef QK_START
QK_START(); /* port-specific startup of the QK kernel */
#endif
QF_INT_ENABLE();
for (;;) { /* QK idle loop... */
QK_onIdle(); /* application-specific QK on-idle callback */
}
#ifdef __GNUC__
return 0;
#endif
/*! QActive active object class customization for QK */
/*! Starts execution of an active object and registers the object
* with the framework customized for QK
*/
Q_UNUSED_PAR(stkSto); /* not needed in QK */
Q_UNUSED_PAR(stkSize); /* not needed in QK */
/*! @pre AO cannot be started from an ISR and the stack storage must
* NOT be provided because the QK kernel does not need per-AO stacks.
*/
Q_REQUIRE_ID(300, (!QK_ISR_CONTEXT_())
&& (stkSto == (void *)0));
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = (uint8_t)(prioSpec >> 8U); /* preemption-threshold */
QActive_register_(me); /* make QF aware of this active object */
QEQueue_init(&me->eQueue, qSto, qLen); /* init the built-in queue */
QHSM_INIT(&me->super, par, me->prio); /* top-most initial tran. */
QS_FLUSH(); /* flush the trace buffer to the host */
/* See if this AO needs to be scheduled if QK is already running */
QF_CRIT_STAT_
QF_CRIT_E_();
if (QK_sched_() != 0U) { /* activation needed? */
QK_activate_();
}
QF_CRIT_X_();
/*! QK scheduler finds the highest-priority AO ready to run
* @static @private @memberof QK
*
* @details
* The QK scheduler finds out the priority of the highest-priority AO
* that (1) has events to process and (2) has priority that is above the
* current priority.
*
* @returns
* The QF-priority of the next active object to activate, or zero
* if no activation of AO is needed.
*
* @attention
* QK_sched_() must be always called with interrupts **disabled** and
* returns with interrupts **disabled**.
*/
uint_fast8_t p;
if (QPSet_isEmpty(&QF_readySet_)) {
p = 0U; /* no activation needed */
}
else {
/* find the highest-prio AO with non-empty event queue */
p = QPSet_findMax(&QF_readySet_);
/* is the AO's priority below the active preemption-threshold? */
if (p <= QK_attr_.actThre) {
p = 0U; /* no activation needed */
}
/* is the AO's priority below the lock-ceiling? */
else if (p <= QK_attr_.lockCeil) {
p = 0U; /* no activation needed */
}
else {
QK_attr_.nextPrio = (uint8_t)p; /* next AO to run */
}
}
return p;
/*! QK activator activates the next active object. The activated AO preempts
* the currently executing AOs.
* @static @private @memberof QK
*
* @details
* QK_activate_() activates ready-to run AOs that are above the initial
* preemption-threshold.
*
* @attention
* QK_activate_() must be always called with interrupts **disabled** and
* returns with interrupts **disabled**.
*/
uint8_t const prio_in = QK_attr_.actPrio; /* saved initial priority */
uint8_t p = QK_attr_.nextPrio; /* next prio to run */
QK_attr_.nextPrio = 0U; /* clear for the next time */
/* QK_attr_.actPrio and QK_attr_.nextPrio must be in range */
Q_REQUIRE_ID(500, (prio_in <= QF_MAX_ACTIVE)
&& (0U < p) && (p <= QF_MAX_ACTIVE));
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
uint8_t pprev = prio_in;
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
/* loop until no more ready-to-run AOs of higher pthre than the initial */
QActive *a;
do {
a = QActive_registry_[p]; /* obtain the pointer to the AO */
Q_ASSERT_ID(505, a != (QActive *)0); /* the AO must be registered */
/* set new active priority and preemption-threshold */
QK_attr_.actPrio = p;
QK_attr_.actThre = a->pthre;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (p != pprev) { /* changing threads? */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_NEXT, p)
QS_TIME_PRE_(); /* timestamp */
QS_2U8_PRE_(p, /* priority of the scheduled AO */
pprev); /* previous priority */
QS_END_NOCRIT_PRE_()
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(((pprev != 0U)
? QActive_registry_[pprev]
: (QActive *)0), a);
#endif /* QF_ON_CONTEXT_SW */
pprev = p; /* update previous priority */
}
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
QF_INT_ENABLE(); /* unconditionally enable interrupts */
/* perform the run-to-completion (RTC) step...
* 1. retrieve the event from the AO's event queue, which by this
* time must be non-empty and QActive_get_() asserts it.
* 2. dispatch the event to the AO's state machine.
* 3. determine if event is garbage and collect it if so
*/
QEvt const * const e = QActive_get_(a);
QHSM_DISPATCH(&a->super, e, p);
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
/* determine the next highest-priority AO ready to run... */
QF_INT_DISABLE(); /* unconditionally disable interrupts */
if (a->eQueue.frontEvt == (QEvt *)0) { /* empty queue? */
QPSet_remove(&QF_readySet_, p);
}
if (QPSet_isEmpty(&QF_readySet_)) {
p = 0U; /* no activation needed */
}
else {
/* find new highest-prio AO ready to run... */
p = (uint8_t)QPSet_findMax(&QF_readySet_);
/* is the new priority below the initial preemption-threshold? */
if (p <= QActive_registry_[prio_in]->pthre) {
p = 0U; /* no activation needed */
}
/* is the AO's priority below the lock preemption-ceiling? */
else if (p <= QK_attr_.lockCeil) {
p = 0U; /* no activation needed */
}
else {
Q_ASSERT_ID(510, p <= QF_MAX_ACTIVE);
}
}
} while (p != 0U);
/* restore the active priority and preemption-threshold */
QK_attr_.actPrio = prio_in;
QK_attr_.actThre = QActive_registry_[prio_in]->pthre;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (prio_in != 0U) { /* resuming an active object? */
a = QActive_registry_[prio_in]; /* pointer to the preempted AO */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_NEXT, prio_in)
QS_TIME_PRE_(); /* timestamp */
/* priority of the resumed AO, previous priority */
QS_2U8_PRE_(prio_in, pprev);
QS_END_NOCRIT_PRE_()
}
else { /* resuming priority==0 --> idle */
a = (QActive *)0; /* QK idle loop */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_IDLE, pprev)
QS_TIME_PRE_(); /* timestamp */
QS_U8_PRE_(pprev); /* previous priority */
QS_END_NOCRIT_PRE_()
}
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QActive_registry_[pprev], a);
#endif /* QF_ON_CONTEXT_SW */
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
/*! Internal macro that reports the execution context (ISR vs. thread)
*
* @returns true if the code executes in the ISR context and false
* otherwise
*/
(QF_intNest_ != 0U)
/*! QK scheduler lock status */
QSchedStatus lockStat_;
/*! QK selective scheduler locking */
do { \
if (QK_ISR_CONTEXT_()) { \
lockStat_ = 0xFFU; \
} else { \
lockStat_ = QK_schedLock((ceil_)); \
} \
} while (false)
/*! QK selective scheduler unlocking */
do { \
if (lockStat_ != 0xFFU) { \
QK_schedUnlock(lockStat_); \
} \
} while (false)
/*! QK native event queue waiting */
\
(Q_ASSERT_ID(110, (me_)->eQueue.frontEvt != (QEvt *)0))
/*! QK native event queue signaling */
do { \
QPSet_insert(&QF_readySet_, (uint_fast8_t)(me_)->prio); \
if (!QK_ISR_CONTEXT_()) { \
if (QK_sched_() != 0U) { \
QK_activate_(); \
} \
} \
} while (false)
/*! QXK idle callback (customized in BSPs for QXK)
* @static @public @memberof QXK
*
* @details
* QXK_onIdle() is called continuously by the QXK idle thread. This callback
* gives the application an opportunity to enter a power-saving CPU mode,
* or perform some other idle processing.
*
* @note
* QXK_onIdle() is invoked with interrupts enabled and must also return with
* interrupts enabled.
*/
/*! QXK Scheduler lock
* @static @public @memberof QXK
*
* @details
* This function locks the QXK scheduler to the specified ceiling.
*
* @param[in] ceiling preemption ceiling to which the QXK scheduler
* needs to be locked
*
* @returns
* The previous QXK Scheduler lock status, which is to be used to unlock
* the scheduler by restoring its previous lock status in
* QXK_schedUnlock().
*
* @note
* A QXK scheduler can be locked from both basic threads (AOs) and
* extended threads and the scheduler locks can nest.
*
* @note
* QXK_schedLock() must be always followed by the corresponding
* QXK_schedUnlock().
*
* @attention
* QXK will fire an assertion if a thread holding the lock attempts
* to block.
*
* @sa QXK_schedUnlock()
*
* @usage
* The following example shows how to lock and unlock the QXK scheduler:
* @include qxk_lock.c
*/
QF_CRIT_STAT_
QF_CRIT_E_();
/*! @pre The QXK scheduler lock cannot be called from an ISR */
Q_REQUIRE_ID(400, !QXK_ISR_CONTEXT_());
QSchedStatus stat; /* saved lock status to be returned */
/* is the lock ceiling being raised? */
if (ceiling > (uint_fast8_t)QXK_attr_.lockCeil) {
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_LOCK, 0U)
QS_TIME_PRE_(); /* timestamp */
/* the previous lock ceiling & new lock ceiling */
QS_2U8_PRE_(QXK_attr_.lockCeil, (uint8_t)ceiling);
QS_END_NOCRIT_PRE_()
/* previous status of the lock */
stat = (QSchedStatus)QXK_attr_.lockHolder;
stat |= (QSchedStatus)QXK_attr_.lockCeil << 8U;
/* new status of the lock */
QXK_attr_.lockHolder = (QXK_attr_.curr != (QActive *)0)
? QXK_attr_.curr->prio
: 0U;
QXK_attr_.lockCeil = (uint8_t)ceiling;
}
else {
stat = 0xFFU; /* scheduler not locked */
}
QF_CRIT_X_();
return stat; /* return the status to be saved in a stack variable */
/*! QXK Scheduler unlock
* @static @public @memberof QXK
*
* @details
* This function unlocks the QXK scheduler to the previous status.
*
* @param[in] stat previous QXK Scheduler lock status returned from
* QXK_schedLock()
*
* @note
* A QXK scheduler can be locked from both basic threads (AOs) and
* extended threads and the scheduler locks can nest.
*
* @note
* QXK_schedUnlock() must always follow the corresponding QXK_schedLock().
*
* @sa QXK_schedLock()
*
* @usage
* The following example shows how to lock and unlock the QXK scheduler:
* @include qxk_lock.c
*/
/* has the scheduler been actually locked by the last QXK_schedLock()? */
if (stat != 0xFFU) {
uint8_t const lockCeil = QXK_attr_.lockCeil;
uint8_t const prevCeil = (uint8_t)(stat >> 8U);
QF_CRIT_STAT_
QF_CRIT_E_();
/*! @pre The scheduler cannot be unlocked:
* - from the ISR context; and
* - the current lock ceiling must be greater than the previous
*/
Q_REQUIRE_ID(500, (!QXK_ISR_CONTEXT_())
&& (lockCeil > prevCeil));
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_UNLOCK, 0U)
QS_TIME_PRE_(); /* timestamp */
/* ceiling before unlocking & prio after unlocking */
QS_2U8_PRE_(lockCeil, prevCeil);
QS_END_NOCRIT_PRE_()
/* restore the previous lock ceiling and lock holder */
QXK_attr_.lockCeil = prevCeil;
QXK_attr_.lockHolder = (uint8_t)(stat & 0xFFU);
/* find if any threads should be run after unlocking the scheduler */
if (QXK_sched_() != 0U) { /* activation needed? */
QXK_activate_(); /* synchronously activate basic-thred(s) */
}
QF_CRIT_X_();
}
/*! timeout signals for extended threads */
{
QXK_DELAY_SIG = 1,
QXK_TIMEOUT_SIG
};
/*! QF initialization for QXK */
#if (QF_MAX_EPOOL > 0U)
QF_maxPool_ = 0U;
#endif
QF_bzero(&QTimeEvt_timeEvtHead_[0], sizeof(QTimeEvt_timeEvtHead_));
QF_bzero(&QActive_registry_[0], sizeof(QActive_registry_));
QF_bzero(&QF_readySet_, sizeof(QF_readySet_));
QF_bzero(&QXK_attr_, sizeof(QXK_attr_));
/* setup the QXK scheduler as initially locked and not running */
QXK_attr_.lockCeil = (QF_MAX_ACTIVE + 1U); /* scheduler locked */
/* QXK idle AO object (const in ROM) */
static QActive const idle_ao = { (struct QHsmVtable const *)0 };
/* register the idle AO object (cast 'const' away) */
QActive_registry_[0] = (QActive *)&idle_ao;
QXK_attr_.prev = QActive_registry_[0];
#ifdef QXK_INIT
QXK_INIT(); /* port-specific initialization of the QXK kernel */
#endif
/*! stop the QF customization for QXK
*
* @sa QF_onCleanup()
*/
QF_onCleanup(); /* application-specific cleanup callback */
/* nothing else to do for the dual-mode QXK kernel */
/*! QF_run() customization for QXK kernel */
#ifdef Q_SPY
QS_SIG_DICTIONARY(QXK_DELAY_SIG, (void *)0);
QS_SIG_DICTIONARY(QXK_TIMEOUT_SIG, (void *)0);
/* produce the QS_QF_RUN trace record */
QF_INT_DISABLE();
QS_beginRec_((uint_fast8_t)QS_QF_RUN);
QS_endRec_();
QF_INT_ENABLE();
#endif /* Q_SPY */
QF_onStartup(); /* application-specific startup callback */
QF_INT_DISABLE();
QXK_attr_.lockCeil = 0U; /* unlock the QXK scheduler */
/* activate AOs to process events posted so far */
if (QXK_sched_() != 0U) {
QXK_activate_();
}
#ifdef QXK_START
QXK_START(); /* port-specific startup of the QXK kernel */
#endif
QF_INT_ENABLE();
for (;;) { /* QXK idle loop... */
QXK_onIdle(); /* application-specific QXK idle callback */
}
#ifdef __GNUC__
return 0;
#endif
/*! @brief eXtended (blocking) thread of the QXK preemptive kernel
* @class QXThread
* @extends QActive
*
* @details
* ::QXThread represents the eXtended (blocking) thread of the QXK
* kernel. Each extended thread in the application must be represented
* by the corresponding ::QXThread instance
*
* @note
* Typically, ::QXThread is instantiated directly in the application code.
* The customization of the thread occurs in the QXThread_ctor(), where you
* provide the thread-handler function as the parameter.
*
* @usage
* The following example illustrates how to instantiate and use an extended
* thread in your application.
* @include qxk_thread.c
*/
/*! time event to handle blocking timeouts */
/*! dummy static to force generation of "struct QXThread" */
/*! constructor of an extended-thread
* @public @memberof QXThread
*
* @details
* Performs the first step of QXThread initialization by assigning the
* thread-handler function and the tick rate at which it will handle
* the timeouts.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] handler the thread-handler function
* @param[in] tickRate the ticking rate for timeouts in this thread
* (see QXThread_delay() and QTIMEEVT_TICK_X())
*
* @note
* Must be called only ONCE before QXTHREAD_START().
*
* @usage
* The following example illustrates how to invoke QXThread_ctor() in the
* main() function
*
* @include
* qxk_thread_ctor.c
*/
static QXThreadVtable const vtable = { /* QXThread virtual table */
{ &QXThread_init_, /* not used in QXThread */
&QXThread_dispatch_ /* not used in QXThread */
#ifdef Q_SPY
,&QHsm_getStateHandler_ /* not used in QXThread */
#endif
},
&QXThread_start_,
&QXThread_post_,
&QXThread_postLIFO_
};
union QHsmAttr tmp;
tmp.thr = handler;
QActive_ctor(&me->super, tmp.fun); /* superclass' ctor */
me->super.super.vptr = &vtable.super; /* hook to QXThread vtable */
me->super.super.state.act = Q_ACTION_CAST(0); /*mark as extended thread */
/* construct the time event member added in the QXThread class */
QTimeEvt_ctorX(&me->timeEvt, &me->super,
(enum_t)QXK_DELAY_SIG, tickRate);
/*! delay (block) the current extended thread for a specified # ticks
* @static @public @memberof QXThread
*
* @details
* Blocking delay for the number of clock tick at the associated tick rate.
*
* @param[in] nTicks number of clock ticks (at the associated rate)
* to wait for the event to arrive.
* @note
* For the delay to work, the QTIMEEVT_TICK_X() macro needs to be called
* periodically at the associated clock tick rate.
*
* @sa QXThread_ctor()
* @sa QTIMEEVT_TICK_X()
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const thr = QXTHREAD_CAST_(QXK_attr_.curr);
/*! @pre this function must:
* - NOT be called from an ISR;
* - number of ticks cannot be zero
* - be called from an extended thread;
* - the thread must NOT be already blocked on any object.
*/
Q_REQUIRE_ID(800, (!QXK_ISR_CONTEXT_()) /* can't block inside an ISR */
&& (nTicks != 0U) /* number of ticks cannot be zero */
&& (thr != (QXThread *)0) /* current thread must be extended */
&& (thr->super.super.temp.obj == (QMState *)0)); /* !blocked */
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(801, QXK_attr_.lockHolder != thr->super.prio);
/* remember the blocking object */
thr->super.super.temp.obj = QXK_PTR_CAST_(QMState const*, &thr->timeEvt);
QXThread_teArm_(thr, (enum_t)QXK_DELAY_SIG, nTicks);
QXThread_block_(thr);
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); /* BLOCK here */
QF_CRIT_E_();
/* the blocking object must be the time event */
Q_ENSURE_ID(890, thr->super.super.temp.obj
== QXK_PTR_CAST_(QMState const*, &thr->timeEvt));
thr->super.super.temp.obj = (QMState *)0; /* clear */
QF_CRIT_X_();
/* signal of zero means that the time event was posted without
* being canceled.
*/
return thr->timeEvt.super.sig == 0U;
/*! cancel the delay
* @public @memberof QXThread
*
* @details
* Cancel the blocking delay and cause return from the QXThread_delay()
* function.
*
* @returns
* "true" if the thread was actually blocked on QXThread_delay() and
* "false" otherwise.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
bool wasArmed;
if (me->super.super.temp.obj == QXK_PTR_CAST_(QMState*, &me->timeEvt)) {
wasArmed = QXThread_teDisarm_(me);
QXThread_unblock_(me);
}
else {
wasArmed = false;
}
QF_CRIT_X_();
return wasArmed;
/*! obtain a message from the private message queue (block if no messages)
* @static @public @memberof QXThread
*
* @details
* The QXThread_queueGet() operation allows the calling extended thread to
* receive QP events directly into its own built-in event queue from an ISR,
* basic thread (AO), or another extended thread.
*
* If QXThread_queueGet() is called when no events are present in the
* thread's private event queue, the operation blocks the current extended
* thread until either an event is received, or a user-specified timeout
* expires.
*
* @param[in] nTicks number of clock ticks (at the associated rate)
* to wait for the event to arrive. The value of
* ::QXTHREAD_NO_TIMEOUT indicates that no timeout will
* occur and the queue will block indefinitely.
* @returns
* A pointer to the event. If the pointer is not NULL, the event was delivered.
* Otherwise the event pointer of NULL indicates that the queue has timed out.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const thr = QXTHREAD_CAST_(QXK_attr_.curr);
/*! @pre this function must:
* - NOT be called from an ISR;
* - be called from an extended thread;
* - the thread must NOT be already blocked on any object.
*/
Q_REQUIRE_ID(500, (!QXK_ISR_CONTEXT_()) /* can't block inside an ISR */
&& (thr != (QXThread *)0) /* current thread must be extended */
&& (thr->super.super.temp.obj == (QMState *)0)); /* !blocked */
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(501, QXK_attr_.lockHolder != thr->super.prio);
/* is the queue empty? */
if (thr->super.eQueue.frontEvt == (QEvt *)0) {
/* remember the blocking object (the thread's queue) */
thr->super.super.temp.obj
= QXK_PTR_CAST_(QMState const*, &thr->super.eQueue);
QXThread_teArm_(thr, (enum_t)QXK_TIMEOUT_SIG, nTicks);
QPSet_remove(&QF_readySet_, (uint_fast8_t)thr->super.prio);
(void)QXK_sched_(); /* schedule other threads */
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); /* BLOCK here */
QF_CRIT_E_();
/* the blocking object must be this queue */
Q_ASSERT_ID(510, thr->super.super.temp.obj
== QXK_PTR_CAST_(QMState const*, &thr->super.eQueue));
thr->super.super.temp.obj = (QMState *)0; /* clear */
}
/* is the queue not empty? */
QEvt const *e;
if (thr->super.eQueue.frontEvt != (QEvt *)0) {
e = thr->super.eQueue.frontEvt; /* remove from the front */
QEQueueCtr const nFree= thr->super.eQueue.nFree + 1U;
thr->super.eQueue.nFree = nFree; /* update the number of free */
/* any events in the ring buffer? */
if (nFree <= thr->super.eQueue.end) {
/* remove event from the tail */
thr->super.eQueue.frontEvt =
thr->super.eQueue.ring[thr->super.eQueue.tail];
if (thr->super.eQueue.tail == 0U) { /* need to wrap? */
thr->super.eQueue.tail = thr->super.eQueue.end; /* wrap */
}
--thr->super.eQueue.tail;
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET, thr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(&thr->super); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_END_NOCRIT_PRE_()
}
else {
thr->super.eQueue.frontEvt = (QEvt *)0; /* empty queue */
/* all entries in the queue must be free (+1 for fronEvt) */
Q_ASSERT_ID(520, nFree == (thr->super.eQueue.end + 1U));
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_GET_LAST, thr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_SIG_PRE_(e->sig); /* the signal of this event */
QS_OBJ_PRE_(&thr->super); /* this active object */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_END_NOCRIT_PRE_()
}
}
else { /* the queue is still empty -- the timeout must have fired */
e = (QEvt *)0;
}
QF_CRIT_X_();
return e;
/*! Overrides QHsm_init_() */
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(par);
Q_UNUSED_PAR(qs_id);
Q_ERROR_ID(110);
/*! Overrides QHsm_dispatch_() */
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(e);
Q_UNUSED_PAR(qs_id);
Q_ERROR_ID(120);
/*! start QXThread private implementation
* @private @memberof QXThread
*
* @details
* Starts execution of an extended thread and registers it with the framework.
* The extended thread becomes ready-to-run immediately and is scheduled
* if the QXK is already running.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] prio QF-priority of the thread, but no preemption-
* threshold. See also ::QPrioSpec.
* @param[in] qSto pointer to the storage for the ring buffer of the
* event queue. This cold be NULL, if this extended
* thread does not use the built-in event queue.
* @param[in] qLen length of the event queue [in events],
* or zero if queue not used
* @param[in] stkSto pointer to the stack storage (must be provided)
* @param[in] stkSize stack size [in bytes] (must not be zero)
* @param[in] par pointer to an extra parameter (might be NULL).
*
* @note
* Currently, extended trheads in QXK do NOT support preemption-threshold.
* The `prio` must NOT provide preemption-threshold and this function
* will assert it in the precondition.
*
* @usage
* QXThread_start_() should NOT be called directly, only via the macro
* QXTHREAD_START(). The following example shows starting an extended
* thread:
* @include qxk_start.c
*/
Q_UNUSED_PAR(par);
/*! @pre this function must:
* - NOT be called from an ISR;
* - the stack storage must be provided;
* - the thread must be instantiated (see QXThread_ctor())
* - preemption-threshold is NOT provided (because QXK kernel
* does not support preemption-threshold scheduling)
*/
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) /* don't call from an ISR! */
&& (stkSto != (void *)0) /* stack must be provided */
&& (stkSize != 0U)
&& (me->super.state.act == (QActionHandler)0)
&& ((prioSpec & 0xFF00U) == 0U));
/* is storage for the queue buffer provided? */
if (qSto != (QEvt const **)0) {
QEQueue_init(&me->eQueue, qSto, qLen);
}
/* extended thread constructor puts the thread handler in place of
* the top-most initial transition 'me->super.temp.act'
*/
QXK_stackInit_(me, me->super.temp.thr, stkSto, stkSize);
/* the new thread is not blocked on any object */
me->super.temp.obj = (QMState *)0;
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = 0U; /* preemption-threshold NOT used */
QActive_register_(me); /* make QF aware of this active object */
QF_CRIT_STAT_
QF_CRIT_E_();
/* extended-thread becomes ready immediately */
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->prio);
/* see if this thread needs to be scheduled in case QXK is running */
if (QXK_attr_.lockCeil <= QF_MAX_ACTIVE) {
(void)QXK_sched_(); /* schedule other threads */
}
QF_CRIT_X_();
/*! post to the QXThread event queue private implementation
* @private @memberof QXThread
*
* @details
* Direct event posting is the simplest asynchronous communication method
* available in QF. The following example illustrates how the Philo active
* object posts directly the HUNGRY event to the Table active object.
* <br>
* The parameter `margin` specifies the minimum number of free slots in
* the queue that must be available for posting to succeed. The function
* returns 1 (success) if the posting succeeded (with the provided margin)
* and 0 (failure) when the posting fails.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to be posted
* @param[in] margin number of required free slots in the queue after
* posting the event. The special value #QF_NO_MARGIN
* means that this function will assert if posting fails.
* @param[in] sender pointer to a sender object (used only for QS tracing).
*
* @returns
* 'true' (success) if the posting succeeded (with the provided margin) and
* 'false' (failure) when the posting fails.
*
* @note
* Should be called only via the macro QXTHREAD_POST_X().
*
* @note
* The #QF_NO_MARGIN value of the `margin` parameter is special and
* denotes situation when the post() operation is assumed to succeed
* (event delivery guarantee). An assertion fires, when the event cannot
* be delivered in this case.
*
* @note
* For compatibility with the V-table from the superclass ::QActive, the
* me-pointer is typed as pointing to QActive. However, the `me` pointer
* here actually points to the QXThread subclass. Therefore the downcast
* (QXThread *)me is always correct.
*/
#ifndef Q_SPY
Q_UNUSED_PAR(sender);
#endif
QF_CRIT_STAT_
QS_TEST_PROBE_DEF(&QXThread_post_)
/* is it the private time event? */
bool status;
if (e == &QXTHREAD_CAST_(me)->timeEvt.super) {
QF_CRIT_E_();
/* the private time event is disarmed and not in any queue,
* so it is safe to change its signal. The signal of 0 means
* that the time event has expired.
*/
QXTHREAD_CAST_(me)->timeEvt.super.sig = 0U;
QXThread_unblock_(QXTHREAD_CAST_(me));
QF_CRIT_X_();
status = true;
}
/* is the event queue provided? */
else if (me->eQueue.end != 0U) {
QEQueueCtr nFree; /* temporary to avoid UB for volatile access */
/*! @pre event pointer must be valid */
Q_REQUIRE_ID(300, e != (QEvt *)0);
QF_CRIT_E_();
nFree = me->eQueue.nFree; /* get volatile into the temporary */
/* test-probe#1 for faking queue overflow */
QS_TEST_PROBE_ID(1,
nFree = 0U;
)
if (margin == QF_NO_MARGIN) {
if (nFree > 0U) {
status = true; /* can post */
}
else {
status = false; /* cannot post */
Q_ERROR_CRIT_(310); /* must be able to post the event */
}
}
else if (nFree > (QEQueueCtr)margin) {
status = true; /* can post */
}
else {
status = false; /* cannot post, but don't assert */
}
/* is it a dynamic event? */
if (e->poolId_ != 0U) {
QEvt_refCtr_inc_(e); /* increment the reference counter */
}
if (status) { /* can post the event? */
--nFree; /* one free entry just used up */
me->eQueue.nFree = nFree; /* update the volatile */
if (me->eQueue.nMin > nFree) {
me->eQueue.nMin = nFree; /* update minimum so far */
}
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object (recipient) */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(me->eQueue.nMin); /* min number of free entries */
QS_END_NOCRIT_PRE_()
/* queue empty? */
if (me->eQueue.frontEvt == (QEvt *)0) {
me->eQueue.frontEvt = e; /* deliver event directly */
/* is this thread blocked on the queue? */
if (me->super.temp.obj
== QXK_PTR_CAST_(QMState*, &me->eQueue))
{
(void)QXThread_teDisarm_(QXTHREAD_CAST_(me));
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->prio);
if (!QXK_ISR_CONTEXT_()) {
(void)QXK_sched_(); /* schedule other threads */
}
}
}
/* queue is not empty, insert event into the ring-buffer */
else {
/* insert event into the ring buffer (FIFO) */
me->eQueue.ring[me->eQueue.head] = e;
/* need to wrap the head counter? */
if (me->eQueue.head == 0U) {
me->eQueue.head = me->eQueue.end; /* wrap around */
}
--me->eQueue.head; /* advance the head (counter clockwise) */
}
QF_CRIT_X_();
}
else { /* cannot post the event */
QS_BEGIN_NOCRIT_PRE_(QS_QF_ACTIVE_POST_ATTEMPT, me->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(sender); /* the sender object */
QS_SIG_PRE_(e->sig); /* the signal of the event */
QS_OBJ_PRE_(me); /* this active object (recipient) */
QS_2U8_PRE_(e->poolId_, e->refCtr_); /* pool Id & ref Count */
QS_EQC_PRE_(nFree); /* number of free entries */
QS_EQC_PRE_(margin); /* margin requested */
QS_END_NOCRIT_PRE_()
QF_CRIT_X_();
#if (QF_MAX_EPOOL > 0U)
QF_gc(e); /* recycle the event to avoid a leak */
#endif
}
}
else { /* the queue is not available */
#if (QF_MAX_EPOOL > 0U)
QF_gc(e); /* make sure the event is not leaked */
#endif
status = false;
Q_ERROR_ID(320); /* this extended thread cannot accept events */
}
return status;
/*! post to the QXThread event queue (LIFO) private implementation
* @private @memberof QXThread
*
* @details
* Last-In-First-Out (LIFO) policy is not supported for extened threads.
*
* @param[in] me current instance pointer (see @ref oop)
* @param[in] e pointer to the event to post to the queue
*
* @sa
* QActive_postLIFO_()
*/
Q_UNUSED_PAR(me);
Q_UNUSED_PAR(e);
Q_ERROR_ID(410);
const
/*! block QXThread private implementation
* @private @memberof QXThread
*
* @details
* Internal implementation of blocking the given extended thread.
*
* @note
* Must be called from within a critical section
*/
/*! @pre the thread holding the lock cannot block! */
Q_REQUIRE_ID(600, (QXK_attr_.lockHolder != me->super.prio));
QPSet_remove(&QF_readySet_, (uint_fast8_t)me->super.prio);
(void)QXK_sched_(); /* schedule other threads */
const
/*! unblock QXThread private implementation
* @private @memberof QXThread
*
* @details
* Internal implementation of un-blocking the given extended thread.
*
* @note
* must be called from within a critical section
*/
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->super.prio);
if ((!QXK_ISR_CONTEXT_()) /* not inside ISR? */
&& (QActive_registry_[0] != (QActive *)0)) /* kernel started? */
{
(void)QXK_sched_(); /* schedule other threads */
}
/*! arm internal time event private implementation
* @private @memberof QXThread
*
* @details
* Internal implementation of arming the private time event for a given
* timeout at a given system tick rate.
*
* @note
* Must be called from within a critical section
*/
/*! @pre the time event must be unused */
Q_REQUIRE_ID(700, me->timeEvt.ctr == 0U);
me->timeEvt.super.sig = (QSignal)sig;
if (nTicks != QXTHREAD_NO_TIMEOUT) {
me->timeEvt.ctr = (QTimeEvtCtr)nTicks;
me->timeEvt.interval = 0U;
/* is the time event unlinked?
* NOTE: For the duration of a single clock tick of the specified tick
* rate a time event can be disarmed and yet still linked in the list,
* because un-linking is performed exclusively in QTimeEvt_tick_().
*/
if ((me->timeEvt.super.refCtr_ & QTE_IS_LINKED) == 0U) {
uint_fast8_t const tickRate
= ((uint_fast8_t)me->timeEvt.super.refCtr_ & QTE_TICK_RATE);
Q_ASSERT_ID(710, tickRate < QF_MAX_TICK_RATE);
me->timeEvt.super.refCtr_ |= QTE_IS_LINKED;
/* The time event is initially inserted into the separate
* "freshly armed" list based on QTimeEvt_timeEvtHead_[tickRate].act.
* Only later, inside the QTimeEvt_tick_() function, the "freshly
* armed" list is appended to the main list of armed time events
* based on QTimeEvt_timeEvtHead_[tickRate].next. Again, this is
* to keep any changes to the main list exclusively inside
* QTimeEvt_tick_().
*/
me->timeEvt.next
= QXK_PTR_CAST_(QTimeEvt*, QTimeEvt_timeEvtHead_[tickRate].act);
QTimeEvt_timeEvtHead_[tickRate].act = &me->timeEvt;
}
}
/*! disarm internal time event private implementation
* @private @memberof QXThread
*
* @details
* Internal implementation of disarming the private time event.
*
* @note
* Must be called from within a critical section
*/
bool wasArmed;
/* is the time evt running? */
if (me->timeEvt.ctr != 0U) {
wasArmed = true;
me->timeEvt.ctr = 0U; /* schedule removal from list */
}
/* the time event was already automatically disarmed */
else {
wasArmed = false;
}
return wasArmed;
/*! @brief Virtual Table for the ::QXThread class
* (inherited from ::QActiveVtable)
*
* @note
* ::QXThread inherits ::QActive without adding any new virtual
* functions and therefore, ::QXThreadVtable is typedef'ed as ::QActiveVtable.
*/
/*! QActive active object class customization for QK */
/*! Starts execution of an active object and registers the object
* with the framework customized for QXK
*/
Q_UNUSED_PAR(stkSto); /* not needed in QXK */
Q_UNUSED_PAR(stkSize); /* not needed in QXK */
/*! @pre AO cannot be started:
* - from an ISR;
* - the stack storage must NOT be provided (because the QXK kernel
* does not need per-AO stacks)
* - preemption-threshold is NOT provided (because QXK kernel
* does not support preemption-threshold scheduling)
*/
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_())
&& (stkSto == (void *)0)
&& ((prioSpec & 0xFF00U) == 0U));
me->prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority of the AO */
me->pthre = 0U; /* preemption-threshold NOT used */
QActive_register_(me); /* make QF aware of this active object */
QEQueue_init(&me->eQueue, qSto, qLen); /* init the built-in queue */
me->osObject = (void *)0; /* no private stack for the AO */
QHSM_INIT(&me->super, par, me->prio); /* top-most initial tran. */
QS_FLUSH(); /* flush the trace buffer to the host */
/* see if this AO needs to be scheduled if QXK is already running */
QF_CRIT_STAT_
QF_CRIT_E_();
if (QXK_attr_.lockCeil <= QF_MAX_ACTIVE) { /* scheduler running? */
if (QXK_sched_() != 0U) { /* activation needed? */
QXK_activate_(); /* synchronously activate basic-thred(s) */
}
}
QF_CRIT_X_();
/*! @brief Counting Semaphore of the QXK preemptive kernel
* @class QXSemaphore
*
* @details
* ::QXSemaphore is a blocking mechanism intended primarily for signaling
* @ref ::QXThread "extended threads". The semaphore is initialized with
* the maximum count (see QXSemaphore_init()), which allows you to create
* a binary semaphore (when the maximum count is 1) and
* counting semaphore when the maximum count is > 1.
*
* @usage
* The following example illustrates how to instantiate and use the semaphore
* in your application.
* @include qxk_sema.c
*/
/*! set of extended threads waiting on this semaphore */
/*! semaphore up-down counter */
/*! maximum value of the semaphore counter */
/*! initialize the counting semaphore
* @public @memberof QXSemaphore
*
* @details
* Initializes a semaphore with the specified count and maximum count.
* If the semaphore is used for resource sharing, both the initial count
* and maximum count should be set to the number of identical resources
* guarded by the semaphore. If the semaphore is used as a signaling
* mechanism, the initial count should set to 0 and maximum count to 1
* (binary semaphore).
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] count initial value of the semaphore counter
* @param[in] max_count maximum value of the semaphore counter.
* The purpose of the max_count is to limit the counter
* so that the semaphore cannot unblock more times than
* the maximum.
*
* @note
* QXSemaphore_init() must be called **before** the semaphore can be used
* (signaled or waited on).
*/
/*! @pre max_count must be greater than zero */
Q_REQUIRE_ID(100, max_count > 0U);
me->count = (uint8_t)count;
me->max_count = (uint8_t)max_count;
QPSet_setEmpty(&me->waitSet);
/*! wait (block) on the semaphore
* @public @memberof QXSemaphore
*
* @details
* When an extended thread calls QXSemaphore_wait() and the value of the
* semaphore counter is greater than 0, QXSemaphore_wait() decrements the
* semaphore counter and returns (true) to its caller. However, if the value
* of the semaphore counter is 0, the function places the calling thread in
* the waiting list for the semaphore. The thread waits until the semaphore
* is signaled by calling QXSemaphore_signal(), or the specified timeout
* expires. If the semaphore is signaled before the timeout expires, QXK
* resumes the highest-priority extended thread waiting for the semaphore.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] nTicks number of clock ticks (at the associated rate)
* to wait for the semaphore. The value of
* ::QXTHREAD_NO_TIMEOUT indicates that no timeout will
* occur and the semaphore will wait indefinitely.
*
* @returns
* 'true' if the semaphore has been signaled and 'false' if a timeout
* occurred.
*
* @note
* Multiple extended threads can wait for a given semaphore.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
/* volatile into temp. */
QXThread * const curr = QXK_PTR_CAST_(QXThread*, QXK_attr_.curr);
/*! @pre this function must:
* - NOT be called from an ISR;
* - the semaphore must be initialized
* - be called from an extended thread;
* - the thread must NOT be already blocked on any object.
*/
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) /* can't wait inside an ISR */
&& (me->max_count > 0U) /* sema must be initialized */
&& (curr != (QXThread *)0) /* curr must be extended */
&& (curr->super.super.temp.obj == (QMState *)0)); /* NOT blocked */
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(201, QXK_attr_.lockHolder != curr->super.prio);
bool signaled = true; /* assume that the semaphore will be signaled */
if (me->count > 0U) {
--me->count; /* semaphore taken: decrement the count */
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, curr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(curr->super.prio,
me->count);
QS_END_NOCRIT_PRE_()
}
else {
uint_fast8_t const p = (uint_fast8_t)curr->super.prio;
/* remove the curr prio from the ready set (will block)
* and insert to the waiting set on this semaphore
*/
QPSet_remove(&QF_readySet_, p);
QPSet_insert(&me->waitSet, p);
/* remember the blocking object (this semaphore) */
curr->super.super.temp.obj = QXK_PTR_CAST_(QMState*, me);
QXThread_teArm_(curr, (enum_t)QXK_TIMEOUT_SIG, nTicks);
QS_BEGIN_NOCRIT_PRE_(QS_SEM_BLOCK, curr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(curr->super.prio,
me->count);
QS_END_NOCRIT_PRE_()
/* schedule the next thread if multitasking started */
(void)QXK_sched_(); /* schedule other theads */
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); /* BLOCK here !!! */
QF_CRIT_E_(); /* AFTER unblocking... */
/* the blocking object must be this semaphore */
Q_ASSERT_ID(240, curr->super.super.temp.obj
== QXK_PTR_CAST_(QMState*, me));
/* did the blocking time-out? (signal of zero means that it did) */
if (curr->timeEvt.super.sig == 0U) {
if (QPSet_hasElement(&me->waitSet, p)) { /* still waiting? */
QPSet_remove(&me->waitSet, p); /* remove unblocked thread */
signaled = false; /* the semaphore was NOT signaled */
/* semaphore NOT taken: do NOT decrement the count */
}
else { /* semaphore was both signaled and timed out */
--me->count; /* semaphore taken: decrement the count */
}
}
else { /* blocking did NOT time out */
/* the thread must NOT be waiting on this semaphore */
Q_ASSERT_ID(250,!QPSet_hasElement(&me->waitSet, p));
--me->count; /* semaphore taken: decrement the count */
}
curr->super.super.temp.obj = (QMState *)0; /* clear blocking obj. */
}
QF_CRIT_X_();
return signaled;
/*! try wait on the semaphore (non-blocking)
* @public @memberof QXSemaphore
*
* @details
* This function checks if the semaphore counter is greater than 0,
* in which case the counter is decremented.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* 'true' if the semaphore has count available and 'false' NOT available.
*
* @note
* This function can be called from any context, including ISRs and basic
* threads (active objects).
*/
QF_CRIT_STAT_
QF_CRIT_E_();
/*! @pre the semaphore must be initialized */
Q_REQUIRE_ID(300, me->max_count > 0U);
#ifdef Q_SPY
/* volatile into temp. */
QActive const * const curr = QXK_PTR_CAST_(QActive*, QXK_attr_.curr);
#endif /* Q_SPY */
bool isAvailable;
/* is the semaphore available? */
if (me->count > 0U) {
--me->count;
isAvailable = true;
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(curr->prio,
me->count);
QS_END_NOCRIT_PRE_()
}
else { /* the semaphore is NOT available (would block) */
isAvailable = false;
QS_BEGIN_NOCRIT_PRE_(QS_SEM_BLOCK_ATTEMPT, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(curr->prio,
me->count);
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
return isAvailable;
/*! signal (unblock) the semaphore
* @public @memberof QXSemaphore
*
* @details
* If the semaphore counter value is 0 or more, it is incremented, and
* this function returns to its caller. If the extended threads are waiting
* for the semaphore to be signaled, QXSemaphore_signal() removes the highest-
* priority thread waiting for the semaphore from the waiting list and makes
* this thread ready-to-run. The QXK scheduler is then called to determine if
* the awakened thread is now the highest-priority thread that is ready-to-run.
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* 'true' when the semaphore signaled and 'false' when the semaphore count
* exceeded the maximum.
*
* @note
* A semaphore can be signaled from many places, including from ISRs, basic
* threads (AOs), and extended threads.
*/
/*! @pre the semaphore must be initialized */
Q_REQUIRE_ID(400, me->max_count > 0U);
QF_CRIT_STAT_
QF_CRIT_E_();
bool signaled = true; /* assume that the semaphore will be signaled */
if (me->count < me->max_count) {
++me->count; /* increment the semaphore count */
#ifdef Q_SPY
/* volatile into temp. */
QActive const * const curr = QXK_PTR_CAST_(QActive*, QXK_attr_.curr);
#endif /* Q_SPY */
QS_BEGIN_NOCRIT_PRE_(QS_SEM_SIGNAL, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(curr->prio,
me->count);
QS_END_NOCRIT_PRE_()
if (QPSet_notEmpty(&me->waitSet)) {
/* find the highest-priority thread waiting on this semaphore */
uint_fast8_t const p = QPSet_findMax(&me->waitSet);
QXThread * const thr =
QXK_PTR_CAST_(QXThread*, QActive_registry_[p]);
/* assert that the tread:
* - must be registered in QF;
* - must be extended; and
* - must be blocked on this semaphore;
*/
Q_ASSERT_ID(410, (thr != (QXThread *)0)
&& (thr->super.osObject != (struct QActive *)0)
&& (thr->super.super.temp.obj
== QXK_PTR_CAST_(QMState*, me)));
/* disarm the internal time event */
(void)QXThread_teDisarm_(thr);
/* make the thread ready to run and remove from the wait-list */
QPSet_insert(&QF_readySet_, p);
QPSet_remove(&me->waitSet, p);
QS_BEGIN_NOCRIT_PRE_(QS_SEM_TAKE, thr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this semaphore */
QS_2U8_PRE_(thr->super.prio,
me->count);
QS_END_NOCRIT_PRE_()
if (!QXK_ISR_CONTEXT_()) { /* not inside ISR? */
(void)QXK_sched_(); /* schedule other threads */
}
}
}
else {
signaled = false; /* semaphore NOT signaled */
}
QF_CRIT_X_();
return signaled;
/*! @brief Blocking Mutex the QXK preemptive kernel
* @class QXMutex
*
* @details
* ::QXMutex is a blocking mutual exclusion mechanism that can also apply
* the **priority-ceiling protocol** to avoid unbounded priority inversion
* (if initialized with a non-zero ceiling priority, see QXMutex_init()).
* In that case, ::QXMutex requires its own uinque QP priority level, which
* cannot be used by any thread or any other ::QXMutex.
* If initialized with preemption-ceiling of zero, ::QXMutex does **not**
* use the priority-ceiling protocol and does not require a unique QP
* priority (see QXMutex_init()).
* ::QXMutex is **recursive** (re-entrant), which means that it can be locked
* multiple times (up to 255 levels) by the *same* thread without causing
* deadlock.
* ::QXMutex is primarily intended for the @ref ::QXThread
* "extened (blocking) threads", but can also be used by the @ref ::QActive
* "basic threads" through the non-blocking QXMutex_tryLock() API.
*
* @note
* ::QXMutex should be used in situations when at least one of the extended
* threads contending for the mutex blocks while holding the mutex (between
* the QXMutex_lock() and QXMutex_unlock() operations). If no blocking is
* needed while holding the mutex, the more efficient non-blocking mechanism
* of @ref srs_qxk_schedLock() "selective QXK scheduler locking" should be used
* instead. @ref srs_qxk_schedLock() "Selective scheduler locking" is available
* for both @ref ::QActive "basic threads" and @ref ::QXThread "extended
* threads", so it is applicable to situations where resources are shared
* among all these threads.
*
* @usage
* The following example illustrates how to instantiate and use the mutex
* in your application.
* @include qxk_mutex.c
*/
/*! active object used as a placeholder AO for this mutex
* in QActive_registry_[]
*/
/*! set of extended-threads waiting on this mutex */
/*! initialize the QXK priority-ceiling mutex ::QXMutex
* @public @memberof QXMutex
*
* @details
* Initialize the QXK priority ceiling mutex.
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] prioSpec the priority specification for the mutex
* (See also ::QPrioSpec). This value might
* also be zero.
* @note
* `prioSpec == 0` means that the priority-ceiling protocol shall **not**
* be used by this mutex. Such mutex will **not** change (boost) the
* priority of the holding threads.<br>
*
* Conversely, `prioSpec != 0` means that the priority-ceiling protocol
* shall be used by this mutex. Such mutex **will** temporarily boost
* the priority and priority-threshold of the holding thread to the
* priority specification in `prioSpec` (see ::QPrioSpec).
*
* @usage
* @include qxk_mutex.c
*/
/*! @pre preemption-threshold must not be used */
Q_REQUIRE_ID(100, (prioSpec & 0xFF00U) == 0U);
me->ao.prio = (uint8_t)(prioSpec & 0xFFU); /* QF-priority */
me->ao.pthre = 0U; /* preemption-threshold (not used) */
if (prioSpec != 0U) { /* priority-ceiling protocol used? */
QActive_register_(&me->ao); /* register this mutex as AO */
}
/*! lock the QXK priority-ceiling mutex ::QXMutex
* @public @memberof QXMutex
*
* @param[in,out] me current instance pointer (see @ref oop)
* @param[in] nTicks number of clock ticks (at the associated rate)
* to wait for the mutex. The value of
* ::QXTHREAD_NO_TIMEOUT indicates that no timeout will
* occur and the mutex could block indefinitely.
* @returns
* 'true' if the mutex has been acquired and 'false' if a timeout occurred.
*
* @note
* The mutex locks are allowed to nest, meaning that the same extended thread
* can lock the same mutex multiple times (< 255). However, each call to
* QXMutex_lock() must be balanced by the matching call to QXMutex_unlock().
*
* @usage
* @include qxk_mutex.c
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread * const curr = QXK_PTR_CAST_(QXThread*, QXK_attr_.curr);
/*! @pre this function must:
* - NOT be called from an ISR;
* - be called from an extended thread;
* - the mutex-priority must be in range
* - the thread must NOT be already blocked on any object.
*/
Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) /* don't call from an ISR! */
&& (curr != (QXThread *)0) /* current thread must be extended */
&& (me->ao.prio <= QF_MAX_ACTIVE)
&& (curr->super.super.temp.obj == (QMState *)0)); /* not blocked */
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(201, QXK_attr_.lockHolder != curr->super.prio);
/* is the mutex available? */
bool locked = true; /* assume that the mutex will be locked */
if (me->ao.eQueue.nFree == 0U) {
me->ao.eQueue.nFree = 1U; /* mutex lock nesting */
/*! @pre also: the newly locked mutex must have no holder yet */
Q_REQUIRE_ID(202, me->ao.thread == (void *)0);
/* set the new mutex holder to the curr thread and
* save the thread's prio in the mutex
* NOTE: reuse the otherwise unused eQueue data member.
*/
me->ao.thread = curr;
me->ao.eQueue.head = (QEQueueCtr)curr->super.prio;
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* the holder priority must be lower than that of the mutex
* and the priority slot must be occupied by this mutex
*/
Q_ASSERT_ID(210, (curr->super.prio < me->ao.prio)
&& (QActive_registry_[me->ao.prio] == &me->ao));
/* remove the thread's original prio from the ready set
* and insert the mutex's prio into the ready set
*/
QPSet_remove(&QF_readySet_, (uint_fast8_t)me->ao.eQueue.head);
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->ao.prio);
/* put the thread into the AO registry in place of the mutex */
QActive_registry_[me->ao.prio] = &curr->super;
/* set thread's prio to that of the mutex */
curr->super.prio = me->ao.prio;
}
}
/* is the mutex locked by this thread already (nested locking)? */
else if (me->ao.thread == &curr->super) {
/* the nesting level beyond the arbitrary but high limit
* most likely means cyclic or recursive locking of a mutex.
*/
Q_ASSERT_ID(220, me->ao.eQueue.nFree < 0xFFU);
++me->ao.eQueue.nFree; /* lock one more level */
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
}
else { /* the mutex is already locked by a different thread */
/* the mutex holder must be valid */
Q_ASSERT_ID(230, me->ao.thread != (void *)0);
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* the prio slot must be occupied by the thr. holding the mutex */
Q_ASSERT_ID(240, QActive_registry_[me->ao.prio]
== QXK_PTR_CAST_(QActive *, me->ao.thread));
}
/* remove the curr thread's prio from the ready set (will block)
* and insert it to the waiting set on this mutex
*/
uint_fast8_t const p = (uint_fast8_t)curr->super.prio;
QPSet_remove(&QF_readySet_, p);
QPSet_insert(&me->waitSet, p);
/* set the blocking object (this mutex) */
curr->super.super.temp.obj = QXK_PTR_CAST_(QMState*, me);
QXThread_teArm_(curr, (enum_t)QXK_TIMEOUT_SIG, nTicks);
QS_BEGIN_NOCRIT_PRE_(QS_MTX_BLOCK, curr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_2U8_PRE_((uint8_t)me->ao.eQueue.head, /* holder prio */
curr->super.prio); /* blocked thread prio */
QS_END_NOCRIT_PRE_()
/* schedule the next thread if multitasking started */
(void)QXK_sched_(); /* schedule other threads */
QF_CRIT_X_();
QF_CRIT_EXIT_NOP(); /* BLOCK here !!! */
/* AFTER unblocking... */
QF_CRIT_E_();
/* the blocking object must be this mutex */
Q_ASSERT_ID(240, curr->super.super.temp.obj
== QXK_PTR_CAST_(QMState*, me));
/* did the blocking time-out? (signal of zero means that it did) */
if (curr->timeEvt.super.sig == 0U) {
if (QPSet_hasElement(&me->waitSet, p)) { /* still waiting? */
QPSet_remove(&me->waitSet, p); /* remove unblocked thread */
locked = false; /* the mutex was NOT locked */
}
}
else { /* blocking did NOT time out */
/* the thread must NOT be waiting on this mutex */
Q_ASSERT_ID(250, !QPSet_hasElement(&me->waitSet, p));
}
curr->super.super.temp.obj = (QMState *)0; /* clear blocking obj. */
}
QF_CRIT_X_();
return locked;
/*! try to lock the QXK priority-ceiling mutex ::QXMutex
* @public @memberof QXMutex
*
* @param[in,out] me current instance pointer (see @ref oop)
*
* @returns
* 'true' if the mutex was successfully locked and 'false' if the mutex was
* unavailable and was NOT locked.
*
* @note
* This function **can** be called from both basic threads (active objects)
* and extended threads.
*
* @note
* The mutex locks are allowed to nest, meaning that the same extended thread
* can lock the same mutex multiple times (<= 255). However, each successful
* call to QXMutex_tryLock() must be balanced by the matching call to
* QXMutex_unlock().
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QActive *curr = QXK_attr_.curr;
if (curr == (QActive *)0) { /* called from a basic thread? */
curr = QActive_registry_[QXK_attr_.actPrio];
}
/*! @pre this function must:
* - NOT be called from an ISR;
* - the calling thread must be valid;
* - the mutex-priority must be in range
*/
Q_REQUIRE_ID(300, (!QXK_ISR_CONTEXT_()) /* don't call from an ISR! */
&& (curr != (QActive *)0) /* current thread must be valid */
&& (me->ao.prio <= QF_MAX_ACTIVE));
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(301, QXK_attr_.lockHolder != curr->prio);
/* is the mutex available? */
if (me->ao.eQueue.nFree == 0U) {
me->ao.eQueue.nFree = 1U; /* mutex lock nesting */
/*! @pre also: the newly locked mutex must have no holder yet */
Q_REQUIRE_ID(302, me->ao.thread == (void *)0);
/* set the new mutex holder to the curr thread and
* save the thread's prio in the mutex
* NOTE: reuse the otherwise unused eQueue data member.
*/
me->ao.thread = curr;
me->ao.eQueue.head = (QEQueueCtr)curr->prio;
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* the holder priority must be lower than that of the mutex
* and the priority slot must be occupied by this mutex
*/
Q_ASSERT_ID(210, (curr->prio < me->ao.prio)
&& (QActive_registry_[me->ao.prio] == &me->ao));
/* remove the thread's original prio from the ready set
* and insert the mutex's prio into the ready set
*/
QPSet_remove(&QF_readySet_, (uint_fast8_t)me->ao.eQueue.head);
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->ao.prio);
/* put the thread into the AO registry in place of the mutex */
QActive_registry_[me->ao.prio] = curr;
/* set thread's prio to that of the mutex */
curr->prio = me->ao.prio;
}
}
/* is the mutex locked by this thread already (nested locking)? */
else if (me->ao.thread == curr) {
/* the nesting level must not exceed the specified limit */
Q_ASSERT_ID(320, me->ao.eQueue.nFree < 0xFFU);
++me->ao.eQueue.nFree; /* lock one more level */
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
}
else { /* the mutex is already locked by a different thread */
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* the prio slot must be occupied by the thr. holding the mutex */
Q_ASSERT_ID(340, QActive_registry_[me->ao.prio]
== QXK_PTR_CAST_(QActive *, me->ao.thread));
}
QS_BEGIN_NOCRIT_PRE_(QS_MTX_BLOCK_ATTEMPT, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_2U8_PRE_((uint8_t)me->ao.eQueue.head, /* holder prio */
curr->prio); /* trying thread prio */
QS_END_NOCRIT_PRE_()
curr = (QActive *)0; /* means that mutex is NOT available */
}
QF_CRIT_X_();
return curr != (QActive *)0;
/*! unlock the QXK priority-ceiling mutex ::QXMutex
* @public @memberof QXMutex
*!
* @param[in,out] me current instance pointer (see @ref oop)
*
* @note
* This function **can** be called from both basic threads (active objects)
* and extended threads.
*
* @note
* The mutex locks are allowed to nest, meaning that the same extended thread
* can lock the same mutex multiple times (<= 225). However, each call to
* QXMutex_lock() or a *successful* call to QXMutex_tryLock() must be
* balanced by the matching call to QXMutex_unlock().
*
* @usage
* @include qxk_mutex.c
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QActive *curr = QXK_attr_.curr;
if (curr == (QActive *)0) { /* called from a basic thread? */
curr = QActive_registry_[QXK_attr_.actPrio];
}
/*! @pre this function must:
* - NOT be called from an ISR;
* - the calling thread must be valid;
*/
Q_REQUIRE_ID(400, (!QXK_ISR_CONTEXT_()) /* don't call from an ISR! */
&& (curr != (QActive *)0)); /* current thread must be valid */
/*! @pre also: the mutex must be already locked at least once. */
Q_REQUIRE_ID(401, me->ao.eQueue.nFree > 0U);
/*! @pre also: the mutex must be held by this thread. */
Q_REQUIRE_ID(402, me->ao.thread == curr);
/* is this the last nesting level? */
if (me->ao.eQueue.nFree == 1U) {
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* restore the holding thread's prio from the mutex */
curr->prio = (uint8_t)me->ao.eQueue.head;
/* put the mutex back into the AO registry */
QActive_registry_[me->ao.prio] = &me->ao;
/* remove the mutex' prio from the ready set
* and insert the original thread's priority
*/
QPSet_remove(&QF_readySet_, (uint_fast8_t)me->ao.prio);
QPSet_insert(&QF_readySet_, (uint_fast8_t)me->ao.eQueue.head);
}
QS_BEGIN_NOCRIT_PRE_(QS_MTX_UNLOCK, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_2U8_PRE_((uint8_t)me->ao.eQueue.head, /* holder prio */
0U); /* nesting */
QS_END_NOCRIT_PRE_()
/* are any other threads waiting on this mutex? */
if (QPSet_notEmpty(&me->waitSet)) {
/* find the highest-priority thread waiting on this mutex */
uint_fast8_t const p = QPSet_findMax(&me->waitSet);
/* remove this thread from waiting on the mutex
* and insert it into the ready set.
*/
QPSet_remove(&me->waitSet, p);
QPSet_insert(&QF_readySet_, p);
QXThread * const thr =
QXK_PTR_CAST_(QXThread*, QActive_registry_[p]);
/* the waiting thread must:
* - be registered in QF
* - have the priority corresponding to the registration
* - be an extended thread
* - be blocked on this mutex
*/
Q_ASSERT_ID(410, (thr != (QXThread *)0)
&& (thr->super.prio == (uint8_t)p)
&& (thr->super.super.state.act == Q_ACTION_CAST(0))
&& (thr->super.super.temp.obj
== QXK_PTR_CAST_(QMState*, me)));
/* disarm the internal time event */
(void)QXThread_teDisarm_(thr);
/* set the new mutex holder to the curr thread and
* save the thread's prio in the mutex
* NOTE: reuse the otherwise unused eQueue data member.
*/
me->ao.thread = thr;
me->ao.eQueue.head = (QEQueueCtr)thr->super.prio;
QS_BEGIN_NOCRIT_PRE_(QS_MTX_LOCK, thr->super.prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* the holder priority must be lower than that of the mutex */
Q_ASSERT_ID(410, thr->super.prio < me->ao.prio);
/* put the thread into AO registry in place of the mutex */
QActive_registry_[me->ao.prio] = &thr->super;
}
}
else { /* no threads are waiting for this mutex */
me->ao.eQueue.nFree = 0U; /* free up the nesting count */
/* the mutex no longer held by any thread */
me->ao.thread = (void *)0;
me->ao.eQueue.head = 0U;
if (me->ao.prio != 0U) { /* priority-ceiling protocol used? */
/* put the mutex back at the original mutex slot */
QActive_registry_[me->ao.prio] =
QXK_PTR_CAST_(QActive*, me);
}
}
/* schedule the next thread if multitasking started */
if (QXK_sched_() != 0U) { /* activation needed? */
QXK_activate_(); /* synchronously activate basic-thred(s) */
}
}
else { /* releasing one level of nested mutex lock */
Q_ASSERT_ID(420, me->ao.eQueue.nFree > 0U);
--me->ao.eQueue.nFree; /* unlock one level */
QS_BEGIN_NOCRIT_PRE_(QS_MTX_UNLOCK_ATTEMPT, curr->prio)
QS_TIME_PRE_(); /* timestamp */
QS_OBJ_PRE_(me); /* this mutex */
QS_U8_PRE_((uint8_t)me->ao.eQueue.head); /* holder prio */
QS_U8_PRE_((uint8_t)me->ao.eQueue.nFree); /* nesting */
QS_END_NOCRIT_PRE_()
}
QF_CRIT_X_();
/*! @brief The QXK kernel class
* @class QXK
*
* @note
* The order and alignment of the data members in this struct might
* be important in QXK ports, where the members might be accessed
* in assembly.
*/
{
struct QActive * volatile curr; /*!< current thread (NULL=basic) */
struct QActive * volatile next; /*!< next thread to run */
struct QActive * volatile prev; /*!< previous thread */
uint8_t volatile actPrio; /*!< QF-prio of the active AO */
uint8_t volatile lockCeil; /*!< lock-ceiling (0==no-lock) */
uint8_t volatile lockHolder; /*!< prio of the lock holder */
} QXK;
/*! attributes of the QXK kernel */
/*! QXK scheduler finds the highest-priority thread ready to run
* @static @private @memberof QXK
*
* @details
* The QXK scheduler finds the priority of the highest-priority thread
* that is ready to run.
*
* @returns
* the 1-based priority of the the thread (basic or extended) run next,
* or zero if no eligible thread is found.
*
* @attention
* QXK_sched_() must be always called with interrupts **disabled** and
* returns with interrupts **disabled**.
*/
uint_fast8_t p;
if (QPSet_isEmpty(&QF_readySet_)) {
p = 0U; /* no activation needed */
}
else {
/* find the highest-prio thread ready to run */
p = QPSet_findMax(&QF_readySet_);
if (p <= QXK_attr_.lockCeil) {
/* priority of the thread holding the lock */
p = (uint_fast8_t)QActive_registry_[QXK_attr_.lockHolder]->prio;
if (p != 0U) {
Q_ASSERT_ID(610, QPSet_hasElement(&QF_readySet_, p));
}
}
}
QActive const * const curr = QXK_attr_.curr;
QActive * const next = QActive_registry_[p];
/* the next thread found must be registered in QF */
Q_ASSERT_ID(620, next != (QActive *)0);
/* is the current thread a basic-thread? */
if (curr == (QActive *)0) {
/* is the new priority above the active priority? */
if (p > QXK_attr_.actPrio) {
QXK_attr_.next = next; /* set the next AO to activate */
if (next->osObject != (void *)0) { /* is next extended? */
QXK_CONTEXT_SWITCH_();
p = 0U; /* no activation needed */
}
}
else { /* below the pre-thre */
QXK_attr_.next = (QActive *)0;
p = 0U; /* no activation needed */
}
}
else { /* currently executing an extended-thread */
/* is the current thread different from the next? */
if (curr != next) {
QXK_attr_.next = next;
QXK_CONTEXT_SWITCH_();
}
else { /* current is the same as next */
QXK_attr_.next = (QActive *)0; /* no need to context-switch */
}
p = 0U; /* no activation needed */
}
return p;
/*! QXK activator activates the next active object. The activated AO
* preempts the currently executing AOs.
* @static @private @memberof QXK
*
* @details
* QXK_activate_() activates ready-to run AOs that are above the initial
* active priority (QXK_attr_.actPrio).
*
* @attention
* QXK_activate_() must be always called with interrupts **disabled** and
* returns with interrupts **disabled**.
*/
uint8_t const prio_in = QXK_attr_.actPrio;
QActive *next = QXK_attr_.next; /* the next AO (basic-thread) to run */
/*! @pre QXK_attr_.next must be valid and the prio must be in range */
Q_REQUIRE_ID(700, (next != (QActive *)0) && (prio_in <= QF_MAX_ACTIVE));
/* QXK Context switch callback defined or QS tracing enabled? */
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
QXK_contextSw(next);
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
QXK_attr_.next = (QActive *)0; /* clear the next AO */
QXK_attr_.curr = (QActive *)0; /* current is basic-thread */
/* priority of the next thread */
uint8_t p = next->prio;
/* loop until no more ready-to-run AOs of higher prio than the initial */
do {
QXK_attr_.actPrio = p; /* next active prio */
QF_INT_ENABLE(); /* unconditionally enable interrupts */
/* perform the run-to-completion (RTC) step...
* 1. retrieve the event from the AO's event queue, which by this
* time must be non-empty and QActive_get_() asserts it.
* 2. dispatch the event to the AO's state machine.
* 3. determine if event is garbage and collect it if so
*/
QEvt const * const e = QActive_get_(next);
QHSM_DISPATCH(&next->super, e, next->prio);
#if (QF_MAX_EPOOL > 0U)
QF_gc(e);
#endif
QF_INT_DISABLE(); /* unconditionally disable interrupts */
if (next->eQueue.frontEvt == (QEvt *)0) { /* empty queue? */
QPSet_remove(&QF_readySet_, p);
}
if (QPSet_isEmpty(&QF_readySet_)) {
QXK_attr_.next = (QActive *)0;
next = QActive_registry_[0];
p = 0U; /* no activation needed */
}
else {
/* find next highest-prio below the lock ceiling */
p = (uint8_t)QPSet_findMax(&QF_readySet_);
if (p <= QXK_attr_.lockCeil) {
p = QXK_attr_.lockHolder; /* thread holding lock */
if (p != 0U) {
Q_ASSERT_ID(710, QPSet_hasElement(&QF_readySet_, p));
}
}
/* set the next thread and ensure that it is registered */
next = QActive_registry_[p];
Q_ASSERT_ID(720, next != (QActive *)0);
/* is next a basic thread? */
if (next->osObject == (void *)0) {
/* is the next priority above the initial priority? */
if (p > QActive_registry_[prio_in]->prio) {
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (p != QXK_attr_.actPrio) { /* changing threads? */
QXK_contextSw(next);
}
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
QXK_attr_.next = next;
}
else {
QXK_attr_.next = (QActive *)0;
p = 0U; /* no activation needed */
}
}
else { /* next is the extended-thread */
QXK_attr_.next = next;
QXK_CONTEXT_SWITCH_();
p = 0U; /* no activation needed */
}
}
} while (p != 0U); /* while activation needed */
/* restore the active priority */
QXK_attr_.actPrio = prio_in;
#if (defined QF_ON_CONTEXT_SW) || (defined Q_SPY)
if (next->osObject == (void *)0) {
QXK_contextSw((prio_in == 0U)
? (QActive *)0
: QActive_registry_[prio_in]);
}
#endif /* QF_ON_CONTEXT_SW || Q_SPY */
/*! obtain the currently executing active-object/thread
* @static @public @memberof QXK
*
* @returns
* pointer to the currently executing active-object/thread
*/
/*! @pre the QXK kernel must be running */
Q_REQUIRE_ID(800, QXK_attr_.lockCeil <= QF_MAX_ACTIVE);
QF_CRIT_STAT_
QF_CRIT_E_();
struct QActive *curr = QXK_attr_.curr;
if (curr == (QActive *)0) { /* basic thread? */
curr = QActive_registry_[QXK_attr_.actPrio];
}
QF_CRIT_X_();
/*! @post the current thread must be valid */
Q_ENSURE_ID(890, curr != (QActive *)0);
return curr;
/*! initialize the private stack of a given AO (defined in QXK port) */
/*! QXK context switch management
* @static @public @memberof QXK
*
* @details
* This function performs software tracing (if #Q_SPY is defined)
* and calls QF_onContextSw() (if #QF_ON_CONTEXT_SW is defined)
*
* @param[in] next pointer to the next thread (NULL for basic-thread)
*
* @attention
* QXK_contextSw() is invoked with interrupts **disabled** and must also
* return with interrupts **disabled**.
*/
#ifdef Q_SPY
uint8_t const prev_prio = (QXK_attr_.prev != (QActive *)0)
? QXK_attr_.prev->prio
: 0U;
#endif /* Q_SPY */
if (next != (QActive *)0) { /* next is NOT idle? */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_NEXT, next->prio)
QS_TIME_PRE_(); /* timestamp */
QS_2U8_PRE_(next->prio, prev_prio);
QS_END_NOCRIT_PRE_()
}
else { /* going to idle */
QS_BEGIN_NOCRIT_PRE_(QS_SCHED_IDLE, prev_prio)
QS_TIME_PRE_(); /* timestamp */
QS_U8_PRE_(prev_prio);
QS_END_NOCRIT_PRE_()
}
#ifdef QF_ON_CONTEXT_SW
QF_onContextSw(QXK_attr_.prev, next);
#endif /* QF_ON_CONTEXT_SW */
QXK_attr_.prev = next; /* update the previous thread */
/*! Virtual call to start an extened thread
*
* @details
* Starts execution of the thread and registers the AO with the framework.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] prioSpec_ priority specification at which to start the
* extended thread (see ::QPrioSpec)
* @param[in] qSto_ pointer to the storage for the ring buffer of the
* event queue (used only with the built-in ::QEQueue)
* @param[in] qLen_ length of the event queue (in events)
* @param[in] stkSto_ pointer to the stack storage (used only when
* per-AO stack is needed)
* @param[in] stkSize_ stack size (in bytes)
* @param[in] par_ pointer to the additional port-specific parameter(s)
* (might be NULL).
* @usage
* @include qxk_start.c
*/
\
do { \
Q_ASSERT((me_)->super.super.vptr); \
((*((QActiveVtable const *)((me_)->super.super.vptr))->start)( \
&(me_)->super, (prioSpec_), (QEvt const **)(qSto_), (qLen_), \
(stkSto_), (stkSize_), (par_))); \
} while (false)
/*! No-timeout when blocking on semaphores, mutextes, and queues */
((uint_fast16_t)0)
/*! Asynchronous posting events to the event queue of an eXtended thread
* @details
* This macro does not assert if the queue overflows and cannot accept
* the event with the specified margin of free slots remaining.
*
* @param[in,out] me_ current instance pointer (see @ref oop)
* @param[in] e_ pointer to the event to post
* @param[in] margin_ the minimum free slots in the queue, which
* must still be available after posting the event.
* The special value #QF_NO_MARGIN causes asserting
* failure in case event allocation fails.
* @param[in] sender_ pointer to the sender object (used in QS tracing)
*
* @returns
* 'true' if the posting succeeded, and 'false' if the posting failed due
* to insufficient margin of free slots available in the queue.
*
* @note
* The `sender_` parameter is actually only used when QS tracing is enabled
* (macro #Q_SPY is defined). When QS software tracing is disabled, the
* QXTHREAD_POST_X() macro does not pass the `sender_` parameter, so the
* overhead of passing this extra argument is entirely avoided.
*
* @note
* The pointer to the sender object is not necessarily a pointer to an
* active object. In fact, if QXTHREAD_POST_X() is called from an interrupt
* or other context, you can create a unique object just to unambiguously
* identify the sender of the event.
*
* @usage
* @include qf_postx.c
*/
\
QACTIVE_POST_X(&(me_)->super, (e_), (margin_), (sender_))
/*! Internal macro that reports the execution context (ISR vs. thread)
*
* @returns true if the code executes in the ISR context and false
* otherwise
*/
(QF_intNest_ != 0U)
/*! QXK scheduler lock status */
QSchedStatus lockStat_;
/*! QXK selective scheduler locking */
do { \
if (QXK_ISR_CONTEXT_()) { \
lockStat_ = 0xFFU; \
} else { \
lockStat_ = QXK_schedLock((ceil_)); \
} \
} while (false)
/*! QXK selective scheduler unlocking */
do { \
if (lockStat_ != 0xFFU) { \
QXK_schedUnlock(lockStat_); \
} \
} while (false)
/*! QXK native event queue waiting */
\
(Q_ASSERT_ID(110, (me_)->eQueue.frontEvt != (QEvt *)0))
/*! QXK native event queue signaling */
do { \
QPSet_insert(&QF_readySet_, (uint_fast8_t)(me_)->prio); \
if (!QXK_ISR_CONTEXT_()) { \
if (QXK_sched_() != 0U) { \
QXK_activate_(); \
} \
} \
} while (false)
<type_>
/*! internal macro to encapsulate casting of pointers for MISRA deviations
*
* @details
* This macro is specifically and exclusively used for casting pointers
* that are never de-referenced, but only used for internal bookkeeping and
* checking (via assertions) the correct operation of the QXK kernel.
* Such pointer casting is not compliant with MISRA-2012-Rule 11.3(req)
* as well as other messages (e.g., PC-Lint-Plus warning 826).
* Defining this specific macro for this purpose allows to selectively
* disable the warnings for this particular case.
*/
((type_)(ptr_))
/*! internal macro to encapsulate casting of pointers for MISRA deviations
*
* @details
* This macro is specifically and exclusively used for downcasting pointers
* to QActive to pointers to QXThread in situations when it is known
* that such downcasting is correct.However, such pointer casting is not
* compliant with MISRA-2012-Rule 11.3(req) as well as other messages (e.g.,
* PC-Lint-Plus warning 826). Defining this specific macro for this purpose
* allows to selectively disable the warnings for this particular case.
*/
((QXThread *)(ptr_))
/*! called when QXThread exits
* @private @memberof QXThread
*
* @details
* Called when the extended-thread handler function returns.
*
* @note
* Most thread handler functions are structured as endless loops that never
* return. But it is also possible to structure threads as one-shot functions
* that perform their job and return. In that case this function peforms
* cleanup after the thread.
*/
QF_CRIT_STAT_
QF_CRIT_E_();
QXThread const * const thr = QXTHREAD_CAST_(QXK_attr_.curr);
/*! @pre this function must:
* - NOT be called from an ISR;
* - be called from an extended thread;
*/
Q_REQUIRE_ID(900, (!QXK_ISR_CONTEXT_()) /* can't be in the ISR context */
&& (thr != (QXThread *)0)); /* current thread must be extended */
/*! @pre also: the thread must NOT be holding a scheduler lock. */
Q_REQUIRE_ID(901, QXK_attr_.lockHolder != thr->super.prio);
uint_fast8_t const p = (uint_fast8_t)thr->super.prio;
/* remove this thread from the QF */
QActive_registry_[p] = (QActive *)0;
QPSet_remove(&QF_readySet_, p);
(void)QXK_sched_(); /* schedule other threads */
QF_CRIT_X_();
/*!* @file
* @brief Customizable and memory-efficient Design by Contract (DbC)
* for embedded systems
*
* @note
* This header file can be used in C, C++, and mixed C/C++ programs.
*
* @note
* The preprocessor switch #Q_NASSERT disables checking assertions.
* However, it is generally __not__ advisable to disable assertions,
* __especially__ in the production code. Instead, the assertion handler
* Q_onAssert() should be very carefully designed and tested.
*/
#ifndef QP_INC_QASSERT_H_
#define QP_INC_QASSERT_H_
#ifdef __cplusplus
extern "C" {
#endif
$declare ${DbC}
#ifdef __cplusplus
}
#endif
#endif /* QP_INC_QASSERT_H_ */
/*! @file
* @brief QEP/C platform-independent public interface.
*
* @tr{RQP001} @tr{RQP101}
*/
#ifndef QP_INC_QEP_H_
#define QP_INC_QEP_H_
/*==========================================================================*/
/*! The current QP version as an unsigned number
*
* @details
* ::QP_VERSION is a decimal constant, where XX is a 1-digit or 2-digit
* major version number, Y is a 1-digit minor version number, and Z is
* a 1-digit release number.
*/
#define QP_VERSION 721U
/*! The current QP version as a zero terminated string literal.
*
* @details
* ::QP_VERSION_STR is of the form "XX.Y.Z", where XX is a 1-or 2-digit
* major version number, Y is a 1-digit minor version number, and Z is
* a 1-digit release number.
*/
#define QP_VERSION_STR "7.2.1"
/*! Encrypted current QP release (7.2.1) and date (2023-01-15) */
#define QP_RELEASE 0x76D739FEU
/*==========================================================================*/
$declare ${glob-types}
$declare ${QEP-config}
/*==========================================================================*/
$declare ${QEP}
/*==========================================================================*/
$declare ${QEP-macros}
#endif /* QP_INC_QEP_H_ */
/*! @file
* @brief QF/C platform-independent public interface.
*/
#ifndef QP_INC_QF_H_
#define QP_INC_QF_H_
/*==========================================================================*/
$declare ${QF-config}
/*==========================================================================*/
$declare ${QF-types}
$declare ${QF::QActive}
$declare ${QF::QActiveVtable}
$declare ${QF::QMActive}
$declare ${QF::QMActiveVtable}
$declare ${QF::QTimeEvt}
$declare ${QF::QTicker}
$declare ${QF::QF-base}
$declare ${QF::QF-dyn}
$declare ${QF::QF-extern-C}
/*==========================================================================*/
$declare ${QF-macros}
#endif /* QP_INC_QF_H_ */
/*! @file
* @brief Internal (package scope) QF/C interface.
*/
#ifndef QP_INC_QF_PKG_H_
#define QP_INC_QF_PKG_H_
/*==========================================================================*/
$declare ${QF::QF-pkg}
/*==========================================================================*/
/* QF-specific critical section */
#ifndef QF_CRIT_STAT_TYPE
/*! This is an internal macro for defining the critical section
* status type. */
/**
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* provides the definition of the critical section status variable.
* Otherwise this macro is empty.
* @sa #QF_CRIT_STAT_TYPE
*/
#define QF_CRIT_STAT_
/*! This is an internal macro for entering a critical section. */
/**
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* invokes QF_CRIT_ENTRY() passing the key variable as the parameter.
* Otherwise QF_CRIT_ENTRY() is invoked with a dummy parameter.
* @sa QF_CRIT_ENTRY()
*/
#define QF_CRIT_E_() QF_CRIT_ENTRY(dummy)
/*! This is an internal macro for exiting a critical section. */
/**
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* invokes #QF_CRIT_EXIT passing the key variable as the parameter.
* Otherwise #QF_CRIT_EXIT is invoked with a dummy parameter.
* @sa #QF_CRIT_EXIT
*/
#define QF_CRIT_X_() QF_CRIT_EXIT(dummy)
#elif (!defined QF_CRIT_STAT_)
#define QF_CRIT_STAT_ QF_CRIT_STAT_TYPE critStat_;
#define QF_CRIT_E_() QF_CRIT_ENTRY(critStat_)
#define QF_CRIT_X_() QF_CRIT_EXIT(critStat_)
#endif
/*==========================================================================*/
/* Assertions inside the critical section */
#ifdef Q_NASSERT /* Q_NASSERT defined--assertion checking disabled */
#define Q_ASSERT_CRIT_(id_, test_) ((void)0)
#define Q_REQUIRE_CRIT_(id_, test_) ((void)0)
#define Q_ERROR_CRIT_(id_) ((void)0)
#else /* Q_NASSERT not defined--assertion checking enabled */
#define Q_ASSERT_CRIT_(id_, test_) do { \
if ((test_)) {} else { \
QF_CRIT_X_(); \
Q_onAssert(&Q_this_module_[0], (int_t)(id_)); \
} \
} while (false)
#define Q_REQUIRE_CRIT_(id_, test_) Q_ASSERT_CRIT_((id_), (test_))
#define Q_ERROR_CRIT_(id_) do { \
QF_CRIT_X_(); \
Q_onAssert(&Q_this_module_[0], (int_t)(id_)); \
} while (false)
#endif /* Q_NASSERT */
/*==========================================================================*/
/* The following bitmasks are for the fields of the @c refCtr_ attribute
* of the QTimeEvt struct (inherited from QEvt). This attribute is NOT used
* for reference counting in time events, because the @c poolId_ attribute
* is zero ("immutable events").
*/
#define QTE_IS_LINKED (1U << 7U)
#define QTE_WAS_DISARMED (1U << 6U)
#define QTE_TICK_RATE 0x0FU
/*! @brief structure representing a free block in the Native QF Memory Pool */
typedef struct QFreeBlock {
struct QFreeBlock * volatile next;
} QFreeBlock;
/* internal helper macros ==================================================*/
/*! increment the refCtr of a const event (requires casting `const` away)
* @private @memberof QEvt
*
* @tr{PQP11_8}
*/
static inline void QEvt_refCtr_inc_(QEvt const *me) {
++((QEvt *)me)->refCtr_;
}
/*! decrement the refCtr of a const event (requires casting `const` away)
* @private @memberof QEvt
*
* @tr{PQP11_8}
*/
static inline void QEvt_refCtr_dec_(QEvt const *me) {
--((QEvt *)me)->refCtr_;
}
#endif /* QP_INC_QF_PKG_H_ */
/*! @file
* @brief QP natvie, platform-independent, thread-safe event queue interface
* @details
* This header file must be included in all QF ports that use native QF
* event queue for active objects. Also, this file needs to be included
* in the QP/C library when the application uses QActive_defer()/
* QActive_recall(). Finally, this file is also needed when the "raw"
* thread-safe queues are used for communication between active objects
* and non-framework entities, such as ISRs, device drivers, or legacy
* code.
*/
#ifndef QP_INC_QEQUEUE_H_
#define QP_INC_QEQUEUE_H_
#ifndef QF_EQUEUE_CTR_SIZE
/*! The size [bytes] of the ring-buffer counters used in the
* native QF event queue implementation. Valid values: 1U, 2U, or 4U;
* default 1U.
* @details
* This macro can be defined in the QF port file (qf_port.h) to
* configure the ::QEQueueCtr type. Here the macro is not defined so the
* default of 1 byte is chosen.
*/
#define QF_EQUEUE_CTR_SIZE 1U
#endif
#if (QF_EQUEUE_CTR_SIZE == 1U)
/*! The data type to store the ring-buffer counters based on
* the macro #QF_EQUEUE_CTR_SIZE.
* @details
* The dynamic range of this data type determines the maximum length
* of the ring buffer managed by the native QF event queue.
*/
typedef uint8_t QEQueueCtr;
#elif (QF_EQUEUE_CTR_SIZE == 2U)
typedef uint16_t QEQueueCtr;
#elif (QF_EQUEUE_CTR_SIZE == 4U)
typedef uint32_t QEQueueCtr;
#else
#error "QF_EQUEUE_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
/*==========================================================================*/
$declare ${QF::QEQueue}
#endif /* QP_INC_QEQUEUE_H_ */
/*! @file
* @brief QP native, platform-independent memory pool ::QMPool interface.
*/
#ifndef QP_INC_QMPOOL_H_
#define QP_INC_QMPOOL_H_
/*==========================================================================*/
#ifndef QF_MPOOL_SIZ_SIZE
/*! macro to override the default ::QMPoolSize size [bytes].
* Valid values 1U, 2U, or 4U; default 2U
*/
#define QF_MPOOL_SIZ_SIZE 2U
#endif
#if (QF_MPOOL_SIZ_SIZE == 1U)
/*! The data type to store the block-size based on the macro
* #QF_MPOOL_SIZ_SIZE.
* @details
* The dynamic range of this data type determines the maximum size
* of blocks that can be managed by the native QF event pool.
*/
typedef uint8_t QMPoolSize;
#elif (QF_MPOOL_SIZ_SIZE == 2U)
typedef uint16_t QMPoolSize;
#elif (QF_MPOOL_SIZ_SIZE == 4U)
typedef uint32_t QMPoolSize;
#else
#error "QF_MPOOL_SIZ_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
/*==========================================================================*/
#ifndef QF_MPOOL_CTR_SIZE
/*! macro to override the default ::QMPoolCtr size [bytes].
* Valid values 1U, 2U, or 4U; default 2U
*/
#define QF_MPOOL_CTR_SIZE 2U
#endif
#if (QF_MPOOL_CTR_SIZE == 1U)
/*! The data type to store the block-counter based on the macro
* #QF_MPOOL_CTR_SIZE.
* @details
* The dynamic range of this data type determines the maximum number
* of blocks that can be stored in the pool.
*/
typedef uint8_t QMPoolCtr;
#elif (QF_MPOOL_CTR_SIZE == 2U)
typedef uint16_t QMPoolCtr;
#elif (QF_MPOOL_CTR_SIZE == 4U)
typedef uint32_t QMPoolCtr;
#else
#error "QF_MPOOL_CTR_SIZE defined incorrectly, expected 1U, 2U, or 4U"
#endif
/*! Memory pool element to allocate correctly aligned storage
* for QMPool class.
* @param[in] evType_ event type (name of the subclass of QEvt)
*/
#define QF_MPOOL_EL(evType_) \
struct { void *sto_[((sizeof(evType_) - 1U)/sizeof(void*)) + 1U]; }
/*==========================================================================*/
$declare ${QF::QMPool}
#endif /* QP_INC_QMPOOL_H_ */
/*! @file
* @brief QV/C (cooperative "Vanilla" kernel) platform-independent
* public interface
*/
#ifndef QP_INC_QV_H_
#define QP_INC_QV_H_
/*==========================================================================*/
/* QF customization for QV -- data members of the QActive class... */
/* QV event-queue used for AOs */
#define QF_EQUEUE_TYPE QEQueue
/*==========================================================================*/
#include "qequeue.h" /* QV kernel uses the native QP event queue */
#include "qmpool.h" /* QV kernel uses the native QP memory pool */
#include "qf.h" /* QF framework integrates directly with QV */
//============================================================================
$declare ${QV::QV-base}
/*==========================================================================*/
/* interface used only inside QF, but not in applications */
#ifdef QP_IMPL
/* QV-specific scheduler locking and event queue... */
$declare ${QV-impl}
/* Native QF event pool operations... */
$declare ${QF-QMPool-impl}
#endif /* QP_IMPL */
#endif /* QP_INC_QV_H_ */
/*! @file
* @brief QK/C (preemptive non-blocking kernel) platform-independent
* public interface.
*/
#ifndef QP_INC_QK_H_
#define QP_INC_QK_H_
/*==========================================================================*/
/* QF configuration for QK -- data members of the QActive class... */
/* QK event-queue used for AOs */
#define QF_EQUEUE_TYPE QEQueue
/* QK thread type used for AOs
* QK uses this member to store the private Thread-Local Storage pointer.
*/
#define QF_THREAD_TYPE void*
#include "qequeue.h" /* QK kernel uses the native QP event queue */
#include "qmpool.h" /* QK kernel uses the native QP memory pool */
#include "qf.h" /* QF framework integrates directly with QK */
/*==========================================================================*/
$declare ${QK::QK-base}
$declare ${QK::QK-extern-C}
/*==========================================================================*/
/* interface used only inside QF, but not in applications */
#ifdef QP_IMPL
/* QK-specific scheduler locking and event queue... */
$declare ${QK-impl}
/* Native QF event pool operations... */
$declare ${QF-QMPool-impl}
#endif /* QP_IMPL */
#endif /* QP_INC_QK_H_ */
/*! @file
* @brief QXK/C (preemptive dual-mode kernel) platform-independent
* public interface.
*/
#ifndef QP_INC_QXK_H_
#define QP_INC_QXK_H_
/*==========================================================================*/
/* QF configuration for QXK -- data members of the QActive class... */
/* QXK event-queue used for AOs */
#define QF_EQUEUE_TYPE QEQueue
/* QXK OS-object used to store the private stack pointer for extended threads.
* (The private stack pointer is NULL for basic-threads).
*/
#define QF_OS_OBJECT_TYPE void*
/* QXK thread type used to store the private Thread-Local Storage pointer */
#define QF_THREAD_TYPE void*
/*! Access Thread-Local Storage (TLS) and cast it on the given `type_` */
#define QXK_TLS(type_) ((type_)QXK_current()->thread)
/*==========================================================================*/
#include "qequeue.h" /* QXK kernel uses the native QP event queue */
#include "qmpool.h" /* QXK kernel uses the native QP memory pool */
#include "qf.h" /* QF framework integrates directly with QXK */
/*==========================================================================*/
$declare ${QXK::QXK-base}
$declare ${QXK::QXK-extern-C}
$declare ${QXK::QXThread}
$declare ${QXK::QXThreadVtable}
$declare ${QXK::QXSemaphore}
$declare ${QXK::QXMutex}
$declare ${QXK-macros}
/*==========================================================================*/
/* interface used only inside QP implementation, but not in applications */
#ifdef QP_IMPL
/* QXK implementation... */
$declare ${QXK-impl}
/* Native QF event pool operations... */
$declare ${QF-QMPool-impl}
#endif /* QP_IMPL */
#endif /* QP_INC_QXK_H_ */
/*! @file
* @brief QS/C platform-independent public interface.
*/
#ifndef QP_INC_QS_H_
#define QP_INC_QS_H_
#ifndef Q_SPY
#error "Q_SPY must be defined to include qs.h"
#endif
/*==========================================================================*/
$declare ${QS-config}
/*==========================================================================*/
$declare ${QS}
/*==========================================================================*/
$declare ${QS-macros}
/*==========================================================================*/
/* Facilities for QS critical section */
/* QS-specific critical section */
#ifdef QS_CRIT_ENTRY /* separate QS critical section defined? */
#ifndef QS_CRIT_STAT_TYPE
#define QS_CRIT_STAT_
#define QS_CRIT_E_() QS_CRIT_ENTRY(dummy)
#define QS_CRIT_X_() QS_CRIT_EXIT(dummy); QS_REC_DONE()
#else
#define QS_CRIT_STAT_ QS_CRIT_STAT_TYPE critStat_;
#define QS_CRIT_E_() QS_CRIT_ENTRY(critStat_)
#define QS_CRIT_X_() QS_CRIT_EXIT(critStat_); QS_REC_DONE()
#endif /* QS_CRIT_STAT_TYPE */
#else /* separate QS critical section not defined--use the QF definition */
#ifndef QF_CRIT_STAT_TYPE
/*! This is an internal macro for defining the critical section
* status type.
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* provides the definition of the critical section status variable.
* Otherwise this macro is empty.
* @sa #QF_CRIT_STAT_TYPE
*/
#define QS_CRIT_STAT_
/*! This is an internal macro for entering a critical section.
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* invokes QF_CRIT_ENTRY() passing the key variable as the parameter.
* Otherwise QF_CRIT_ENTRY() is invoked with a dummy parameter.
* @sa QF_CRIT_ENTRY()
*/
#define QS_CRIT_E_() QF_CRIT_ENTRY(dummy)
/*! This is an internal macro for exiting a critical section.
* @details
* The purpose of this macro is to enable writing the same code for the
* case when critical section status type is defined and when it is not.
* If the macro #QF_CRIT_STAT_TYPE is defined, this internal macro
* invokes QF_CRIT_EXIT() passing the key variable as the parameter.
* Otherwise QF_CRIT_EXIT() is invoked with a dummy parameter.
* @sa QF_CRIT_EXIT()
*/
#define QS_CRIT_X_() QF_CRIT_EXIT(dummy); QS_REC_DONE()
#elif (!defined QS_CRIT_STAT_)
#define QS_CRIT_STAT_ QF_CRIT_STAT_TYPE critStat_;
#define QS_CRIT_E_() QF_CRIT_ENTRY(critStat_)
#define QS_CRIT_X_() QF_CRIT_EXIT(critStat_); QS_REC_DONE()
#endif /* simple unconditional interrupt disabling used */
#endif /* separate QS critical section not defined */
/*==========================================================================*/
/* Macros for use in QUTest only */
#ifdef Q_UTEST
$declare ${QUTest}
/*--------------------------------------------------------------------------*/
/* QP-stub for QUTest
* NOTE: The QP-stub is needed for unit testing QP applications,
* but might NOT be needed for testing QP itself.
*/
#if Q_UTEST != 0
$declare ${QUTest-stub::QS}
$declare ${QUTest-stub::QHsmDummy}
$declare ${QUTest-stub::QActiveDummy}
#endif /* Q_UTEST != 0 */
/*! QS macro to define the Test-Probe for a given `fun_` */
#define QS_TEST_PROBE_DEF(fun_) \
uint32_t const qs_tp_ = QS_getTestProbe_((void (*)(void))(fun_));
/*! QS macro to apply a Test-Probe */
#define QS_TEST_PROBE(code_) \
if (qs_tp_ != 0U) { code_ }
/*! QS macro to apply a Test-Probe */
#define QS_TEST_PROBE_ID(id_, code_) \
if (qs_tp_ == (uint32_t)(id_)) { code_ }
/*! QS macro to pause test execution and enter the test event-loop */
#define QS_TEST_PAUSE() (QS_test_pause_())
#else /* Q_UTEST not defined */
/* dummy definitions when not building for QUTEST */
#define QS_TEST_PROBE_DEF(fun_)
#define QS_TEST_PROBE(code_)
#define QS_TEST_PROBE_ID(id_, code_)
#define QS_TEST_PAUSE() ((void)0)
#endif /* Q_UTEST */
#endif /* def QP_INC_QS_H_ */
/*! @file
* @brief Dummy definitions of the QS macros that avoid code generation from
* the QS instrumentation.
*/
#ifndef QP_INC_QS_DUMMY_H_
#define QP_INC_QS_DUMMY_H_
#ifdef Q_SPY
#error "Q_SPY must NOT be defined to include qs_dummy.h"
#endif
#define QS_INIT(arg_) ((uint8_t)1U)
#define QS_EXIT() ((void)0)
#define QS_DUMP() ((void)0)
#define QS_GLB_FILTER(rec_) ((void)0)
#define QS_LOC_FILTER(qs_id_) ((void)0)
#define QS_GET_BYTE(pByte_) ((uint16_t)0xFFFFU)
#define QS_GET_BLOCK(pSize_) ((uint8_t *)0)
#define QS_BEGIN_ID(rec_, qs_id_) if (false) {
#define QS_END() }
#define QS_BEGIN_NOCRIT(rec_, qs_id_) if (false) {
#define QS_END_NOCRIT() }
#define QS_I8(width_, data_) ((void)0)
#define QS_U8(width_, data_) ((void)0)
#define QS_I16(width_, data_) ((void)0)
#define QS_U16(width_, data_) ((void)0)
#define QS_I32(width_, data_) ((void)0)
#define QS_U32(width_, data_) ((void)0)
#define QS_F32(width_, data_) ((void)0)
#define QS_F64(width_, data_) ((void)0)
#define QS_I64(width_, data_) ((void)0)
#define QS_U64(width_, data_) ((void)0)
#define QS_ENUM(group_, value_) ((void)0)
#define QS_STR(str_) ((void)0)
#define QS_MEM(mem_, size_) ((void)0)
#define QS_SIG(sig_, obj_) ((void)0)
#define QS_OBJ(obj_) ((void)0)
#define QS_FUN(fun_) ((void)0)
#define QS_SIG_DICTIONARY(sig_, obj_) ((void)0)
#define QS_OBJ_DICTIONARY(obj_) ((void)0)
#define QS_OBJ_ARR_DICTIONARY(obj_, idx_) ((void)0)
#define QS_FUN_DICTIONARY(fun_) ((void)0)
#define QS_USR_DICTIONARY(rec_) ((void)0)
#define QS_ENUM_DICTIONARY(value_, group_) ((void)0)
#define QS_ASSERTION(module_, loc_, delay_) ((void)0)
#define QS_FLUSH() ((void)0)
#define QS_TEST_PROBE_DEF(fun_)
#define QS_TEST_PROBE(code_)
#define QS_TEST_PROBE_ID(id_, code_)
#define QS_TEST_PAUSE() ((void)0)
#define QS_OUTPUT() ((void)0)
#define QS_RX_INPUT() ((void)0)
/*==========================================================================*/
/* internal QS macros used only in the QP components */
#ifdef QP_IMPL
/* predefined QS trace records */
#define QS_BEGIN_PRE_(rec_, qs_id_) if (false) {
#define QS_END_PRE_() }
#define QS_BEGIN_NOCRIT_PRE_(rec_, qs_id_) if (false) {
#define QS_END_NOCRIT_PRE_() }
#define QS_U8_PRE_(data_) ((void)0)
#define QS_2U8_PRE_(data1_, data2_) ((void)0)
#define QS_U16_PRE_(data_) ((void)0)
#define QS_U32_PRE_(data_) ((void)0)
#define QS_TIME_PRE_() ((void)0)
#define QS_SIG_PRE_(sig_) ((void)0)
#define QS_EVS_PRE_(size_) ((void)0)
#define QS_OBJ_PRE_(obj_) ((void)0)
#define QS_FUN_PRE_(fun_) ((void)0)
#define QS_EQC_PRE_(ctr_) ((void)0)
#define QS_MPC_PRE_(ctr_) ((void)0)
#define QS_MPS_PRE_(size_) ((void)0)
#define QS_TEC_PRE_(ctr_) ((void)0)
#define QS_CRIT_STAT_
#define QF_QS_CRIT_ENTRY() ((void)0)
#define QF_QS_CRIT_EXIT() ((void)0)
#define QF_QS_ISR_ENTRY(isrnest_, prio_) ((void)0)
#define QF_QS_ISR_EXIT(isrnest_, prio_) ((void)0)
#define QF_QS_ACTION(act_) ((void)0)
#endif /* QP_IMPL */
#endif /* QP_INC_QS_DUMMY_H_ */
/*! @file
* @brief Internal (package scope) QS/C interface.
*/
#ifndef QP_INC_QS_PKG_H_
#define QP_INC_QS_PKG_H_
/*==========================================================================*/
/*! QS received record types (RX channel)
* @details
* This enumeration specifies the record types for the QS receive channel
*/
enum QSpyRxRecords {
QS_RX_INFO, /*!< query Target info (ver, config, tstamp) */
QS_RX_COMMAND, /*!< execute a user-defined command in the Target */
QS_RX_RESET, /*!< reset the Target */
QS_RX_TICK, /*!< call QTIMEEVT_TICK_X() in the Target */
QS_RX_PEEK, /*!< peek Target memory */
QS_RX_POKE, /*!< poke Target memory */
QS_RX_FILL, /*!< fill Target memory */
QS_RX_TEST_SETUP, /*!< test setup */
QS_RX_TEST_TEARDOWN, /*!< test teardown */
QS_RX_TEST_PROBE, /*!< set a Test-Probe in the Target */
QS_RX_GLB_FILTER, /*!< set global filters in the Target */
QS_RX_LOC_FILTER, /*!< set local filters in the Target */
QS_RX_AO_FILTER, /*!< set local AO filter in the Target */
QS_RX_CURR_OBJ, /*!< set the "current-object" in the Target */
QS_RX_TEST_CONTINUE, /*!< continue a test after QS_TEST_PAUSE() */
QS_RX_QUERY_CURR, /*!< query the "current object" in the Target */
QS_RX_EVENT /*!< inject an event to the Target */
};
/*==========================================================================*/
/*! Frame character of the QS output protocol */
#define QS_FRAME (0x7EU)
/*! Escape character of the QS output protocol */
#define QS_ESC (0x7DU)
/*! The expected checksum value over a correct QS record */
#define QS_GOOD_CHKSUM (0xFFU)
/*! Escape modifier of the QS output protocol */
/**
* @details
* The escaped byte is XOR-ed with the escape modifier before it is inserted
* into the QS buffer.
*/
#define QS_ESC_XOR (0x20U)
/*==========================================================================*/
/*! Internal QS macro to begin a predefined QS record with
* entering critical section.
*
* @note This macro is intended to use only inside QP components and NOT
* at the application level.
* @sa QS_BEGIN_ID()
*/
#define QS_BEGIN_PRE_(rec_, qs_id_) \
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_CRIT_E_(); \
QS_beginRec_((uint_fast8_t)(rec_));
/*! Internal QS macro to end a predefined QS record with
* exiting critical section.
*
* @note This macro is intended to use only inside QP components and NOT
* at the application level.
* @sa QS_END()
*/
#define QS_END_PRE_() \
QS_endRec_(); \
QS_CRIT_X_(); \
}
/*! Internal macro to begin a predefined QS record without
* entering critical section.
*
* @note This macro is intended to use only inside QP components and NOT
* at the application level.
* @sa QS_BEGIN_NOCRIT()
*/
#define QS_BEGIN_NOCRIT_PRE_(rec_, qs_id_) \
if (QS_GLB_CHECK_(rec_) && QS_LOC_CHECK_(qs_id_)) { \
QS_beginRec_((uint_fast8_t)(rec_));
/*! Internal QS macro to end a predefined QS record without
* exiting critical section
*
* @note This macro is intended to use only inside QP components and NOT
* at the application level. @sa #QS_END_NOCRIT
*/
#define QS_END_NOCRIT_PRE_() QS_endRec_(); }
/*! Internal QS macro to output a predefined uint8_t data element */
#define QS_U8_PRE_(data_) (QS_u8_raw_((uint8_t)(data_)))
/*! Internal QS macro to output 2 predefined uint8_t data elements */
#define QS_2U8_PRE_(data1_, data2_) \
(QS_2u8_raw_((uint8_t)(data1_), (uint8_t)(data2_)))
/*! Internal QS macro to output an predefined uint16_t data element */
#define QS_U16_PRE_(data_) (QS_u16_raw_((uint16_t)(data_)))
/*! Internal QS macro to output a predefined uint32_t data element */
#define QS_U32_PRE_(data_) (QS_u32_raw_((uint32_t)(data_)))
/*! Internal QS macro to output a predefined zero-terminated string element */
#define QS_STR_PRE_(msg_) (QS_str_raw_((msg_)))
#if (!defined Q_SIGNAL_SIZE || (Q_SIGNAL_SIZE == 1U))
/*! Internal macro to output an unformatted event signal data element */
/**
* @note the size of the pointer depends on the macro #Q_SIGNAL_SIZE.
*/
#define QS_SIG_PRE_(sig_) (QS_u8_raw_((uint8_t)sig_))
#elif (Q_SIGNAL_SIZE == 2U)
#define QS_SIG_PRE_(sig_) (QS_u16_raw_((uint16_t)sig_))
#elif (Q_SIGNAL_SIZE == 4U)
#define QS_SIG_PRE_(sig_) (QS_u32_raw_((uint32_t)sig_))
#endif
#define QS_OBJ_PRE_(obj_) (QS_obj_raw_(obj_))
#if (!defined QS_FUN_PTR_SIZE || (QS_FUN_PTR_SIZE == 1U))
#define QS_FUN_PRE_(fun_) (QS_u8_raw_((uint8_t)(fun_)))
#elif (QS_FUN_PTR_SIZE == 2U)
#define QS_FUN_PRE_(fun_) (QS_u16_raw_((uint16_t)(fun_)))
#elif (QS_FUN_PTR_SIZE == 4U)
#define QS_FUN_PRE_(fun_) (QS_u32_raw_((uint32_t)(fun_)))
#elif (QS_FUN_PTR_SIZE == 8U)
#define QS_FUN_PRE_(fun_) (QS_u64_raw_((uint64_t)(fun_)))
#else
/*! Internal macro to output an unformatted function pointer */
/** @note the size of the pointer depends on the macro #QS_FUN_PTR_SIZE.
* If the size is not defined the size of pointer is assumed 4-bytes.
*/
#define QS_FUN_PRE_(fun_) (QS_u32_raw_((uint32_t)(fun_)))
#endif
/*==========================================================================*/
#if (!defined QF_EQUEUE_CTR_SIZE || (QF_EQUEUE_CTR_SIZE == 1U))
/*! Internal QS macro to output an unformatted event queue counter
* data element. */
/**
* @note the counter size depends on the macro #QF_EQUEUE_CTR_SIZE.
*/
#define QS_EQC_PRE_(ctr_) QS_u8_raw_((uint8_t)(ctr_))
#elif (QF_EQUEUE_CTR_SIZE == 2U)
#define QS_EQC_PRE_(ctr_) QS_u16_raw_((uint16_t)(ctr_))
#elif (QF_EQUEUE_CTR_SIZE == 4U)
#define QS_EQC_PRE_(ctr_) QS_u32_raw_((uint32_t)(ctr_))
#endif
#if (!defined QF_EVENT_SIZ_SIZE || (QF_EVENT_SIZ_SIZE == 1U))
/*! Internal QS macro to output an unformatted event size
* data element. */
/**
* @note the event size depends on the macro #QF_EVENT_SIZ_SIZE.
*/
#define QS_EVS_PRE_(size_) QS_u8_raw_((uint8_t)(size_))
#elif (QF_EVENT_SIZ_SIZE == 2U)
#define QS_EVS_PRE_(size_) QS_u16_raw_((uint16_t)(size_))
#elif (QF_EVENT_SIZ_SIZE == 4U)
#define QS_EVS_PRE_(size_) QS_u32_raw_((uint32_t)(size_))
#endif
#if (!defined QF_MPOOL_SIZ_SIZE || (QF_MPOOL_SIZ_SIZE == 1U))
/*! Internal QS macro to output an unformatted memory pool
* block-size data element */
/**
* @note the block-size depends on the macro #QF_MPOOL_SIZ_SIZE.
*/
#define QS_MPS_PRE_(size_) QS_u8_raw_((uint8_t)(size_))
#elif (QF_MPOOL_SIZ_SIZE == 2U)
#define QS_MPS_PRE_(size_) QS_u16_raw_((uint16_t)(size_))
#elif (QF_MPOOL_SIZ_SIZE == 4U)
#define QS_MPS_PRE_(size_) QS_u32_raw_((uint32_t)(size_))
#endif
#if (!defined QF_MPOOL_CTR_SIZE || (QF_MPOOL_CTR_SIZE == 1U))
/*! Internal QS macro to output an unformatted memory pool
* block-counter data element. */
/**
* @note the counter size depends on the macro #QF_MPOOL_CTR_SIZE.
*/
#define QS_MPC_PRE_(ctr_) QS_u8_raw_((uint8_t)(ctr_))
#elif (QF_MPOOL_CTR_SIZE == 2U)
#define QS_MPC_PRE_(ctr_) QS_u16_raw_((uint16_t)(ctr_))
#elif (QF_MPOOL_CTR_SIZE == 4U)
#define QS_MPC_PRE_(ctr_) QS_u32_raw_((uint16_t)(ctr_))
#endif
#if (!defined QF_TIMEEVT_CTR_SIZE || (QF_TIMEEVT_CTR_SIZE == 1U))
/*! Internal QS macro to output an unformatted time event
* tick-counter data element */
/**
* @note the counter size depends on the macro #QF_TIMEEVT_CTR_SIZE.
*/
#define QS_TEC_PRE_(ctr_) QS_u8_raw_((uint8_t)(ctr_))
#elif (QF_TIMEEVT_CTR_SIZE == 2U)
#define QS_TEC_PRE_(ctr_) QS_u16_raw_((uint16_t)(ctr_))
#elif (QF_TIMEEVT_CTR_SIZE == 4U)
#define QS_TEC_PRE_(ctr_) QS_u32_raw_((uint32_t)(ctr_))
#endif
/*==========================================================================*/
/*! Internal QS macro to insert an un-escaped byte into the QS buffer */
#define QS_INSERT_BYTE_(b_) \
buf[head] = (b_); \
++head; \
if (head == end) { \
head = 0U; \
}
/*! Internal QS macro to insert an escaped byte into the QS buffer */
#define QS_INSERT_ESC_BYTE_(b_) \
chksum = (uint8_t)(chksum + (b_)); \
if (((b_) != QS_FRAME) && ((b_) != QS_ESC)) { \
QS_INSERT_BYTE_(b_) \
} \
else { \
QS_INSERT_BYTE_(QS_ESC) \
QS_INSERT_BYTE_((uint8_t)((b_) ^ QS_ESC_XOR))\
++QS_priv_.used; \
}
#endif /* QP_INC_QS_PKG_H_ */
/*! @file
* @brief QP/C public interface including backwards-compatibility layer
* @details
* This header file must be included directly or indirectly
* in all application modules (*.c files) that use QP/C.
*/
#ifndef QP_INC_QPC_H_
#define QP_INC_QPC_H_
#ifdef __cplusplus
extern "C" {
#endif
/*==========================================================================*/
#include "qf_port.h" /* QF/C port from the port directory */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* software tracing enabled? */
#include "qs_port.h" /* QS/C port from the port directory */
#else
#include "qs_dummy.h" /* QS/C dummy (inactive) interface */
#endif
/*==========================================================================*/
#ifndef QP_API_VERSION
/*! Specifies the backwards compatibility with the QP/C API version.
* @details
* For example, QP_API_VERSION==691 will cause generating the compatibility
* layer with QP/C version 6.9.1 and newer, but not older than 6.9.1.
* QP_API_VERSION==0 causes generation of the maximum currently supported
* backwards compatibility. This is the default.<br>
* <br>
* Conversely, QP_API_VERSION==9999 means that no compatibility layer should
* be generated. This setting is useful for checking if an application
* complies with the latest QP/C API.
*/
#define QP_API_VERSION 0
#endif /* #ifndef QP_API_VERSION */
/*==========================================================================*/
/* QP API compatibility layer... */
#if (QP_API_VERSION < 700)
/*! @deprecated plain 'char' is no longer forbidden in MISRA-C 2012 */
typedef char char_t;
/*==========================================================================*/
#if (QP_API_VERSION < 691)
/*! @deprecated enable the QS global filter */
#define QS_FILTER_ON(rec_) QS_GLB_FILTER((rec_))
/*! @deprecated disable the QS global filter */
#define QS_FILTER_OFF(rec_) QS_GLB_FILTER(-(rec_))
/*! @deprecated enable the QS local filter for SM (state machine) object */
#define QS_FILTER_SM_OBJ(obj_) ((void)0)
/*! @deprecated enable the QS local filter for AO (active objects) */
#define QS_FILTER_AO_OBJ(obj_) ((void)0)
/*! @deprecated enable the QS local filter for MP (memory pool) object */
#define QS_FILTER_MP_OBJ(obj_) ((void)0)
/*! @deprecated enable the QS local filter for EQ (event queue) object */
#define QS_FILTER_EQ_OBJ(obj_) ((void)0)
/*! @deprecated enable the QS local filter for TE (time event) object */
#define QS_FILTER_TE_OBJ(obj_) ((void)0)
#ifdef Q_SPY
/*! @deprecated local Filter for a generic application object `obj_`. */
#define QS_FILTER_AP_OBJ(obj_) (QS_priv_.locFilter_AP = (obj_))
/*! @deprecated begin of a user QS record, instead use QS_BEGIN_ID() */
#define QS_BEGIN(rec_, obj_) \
if (((QS_priv_.glbFilter[(uint_fast8_t)(rec_) >> 3U] \
& (1U << ((uint_fast8_t)(rec_) & 7U))) != 0U) \
&& ((QS_priv_.locFilter_AP == (void *)0) \
|| (QS_priv_.locFilter_AP == (obj_)))) \
{ \
QS_CRIT_STAT_ \
QS_CRIT_E_(); \
QS_beginRec_((uint_fast8_t)(rec_)); \
QS_TIME_PRE_(); {
/*! @deprecated Output formatted uint32_t to the QS record */
#define QS_U32_HEX(width_, data_) \
(QS_u32_fmt_((uint8_t)(((width_) << 4)) | QS_HEX_FMT, (data_)))
#else
#define QS_FILTER_AP_OBJ(obj_) ((void)0)
#define QS_BEGIN(rec_, obj_) if (false) {
#define QS_U32_HEX(width_, data_) ((void)0)
#endif
/*==========================================================================*/
#if (QP_API_VERSION < 660)
/*! @deprecated casting to QXThreadHandler
* instead use: the new signature of QXThreadHandler and don't cast
*/
#define Q_XTHREAD_CAST(handler_) ((QXThreadHandler)(handler_))
/*==========================================================================*/
#if (QP_API_VERSION < 580)
/*! @deprecated call to the QMSM_INIT() operation; instead use: QHSM_INIT() */
#define QMSM_INIT(me_, e_) QHSM_INIT((me_), (e_))
/*! @deprecated call to the QMSM_DISPATCH() operation;
* instead use: QHSM_DISPATCH() */
#define QMSM_DISPATCH(me_, e_) QHSM_DISPATCH((me_), (e_), 0U)
#endif /* QP_API_VERSION < 580 */
#endif /* QP_API_VERSION < 660 */
#endif /* QP_API_VERSION < 691 */
#endif /* QP_API_VERSION < 700 */
#ifdef __cplusplus
}
#endif
#endif /* QP_INC_QPC_H_ */
/*! @file
* @brief Application build time-stamp interface
*/
#ifndef QP_INC_QSTAMP_H_
#define QP_INC_QSTAMP_H_
extern char const Q_BUILD_DATE[12];
extern char const Q_BUILD_TIME[9];
#endif /* QP_INC_QSTAMP_H_ */
/*! @file
* @brief ::QHsm implementation
*
* @tr{RQP103} @tr{RQP104} @tr{RQP120} @tr{RQP130}
*/
#define QP_IMPL /* this is QP implementation */
#include "qep_port.h" /* QEP port */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qep_hsm")
/*==========================================================================*/
$define ${QEP::QP_versionStr[8]}
/*! Immutable events corresponding to the reserved signals.
*
* @details
* Static, immutable reserved events that the QEP event processor sends
* to state handler functions of QHsm-style state machine to execute entry
* actions, exit actions, and initial transitions.
*/
static QEvt const l_reservedEvt_[] = {
{ (QSignal)Q_EMPTY_SIG, 0U, 0U },
{ (QSignal)Q_ENTRY_SIG, 0U, 0U },
{ (QSignal)Q_EXIT_SIG, 0U, 0U },
{ (QSignal)Q_INIT_SIG, 0U, 0U }
};
/*! helper function to trigger reserved event in an QHsm
* @private @memberof QHsm
*
* @param[in] state state handler function
* @param[in] sig reserved signal to trigger
*/
static inline QState QHsm_reservedEvt_(
QHsm * const me,
QStateHandler const state,
enum QReservedSig const sig)
{
return (*state)(me, &l_reservedEvt_[sig]);
}
$define ${QEP::QHsm}
/*! @file
* @brief ::QMsm implementation
*/
#define QP_IMPL /* this is QP implementation */
#include "qep_port.h" /* QEP port */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qep_msm")
/*==========================================================================*/
/*! internal QEP constants */
/*! maximum depth of entry levels in a MSM for transition to history. */
#define QMSM_MAX_ENTRY_DEPTH_ 4
static struct QMState const l_msm_top_s = {
(struct QMState *)0,
Q_STATE_CAST(0),
Q_ACTION_CAST(0),
Q_ACTION_CAST(0),
Q_ACTION_CAST(0)
};
/*==========================================================================*/
$define ${QEP::QMsm}
/*! @file
* @deprecated
* Empty file kept only for backwards compatibility.
* @sa qf_qact.c
*/
extern char const dummy; /* declaration */
char const dummy = '\0'; /* definition */
/*! @file
* @brief ::QActive native queue operations (based on ::QEQueue)
*
* @note
* this source file is only included in the application build when the native
* QF active object queue is used (instead of a message queue of an RTOS).
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_actq")
/*==========================================================================*/
$define ${QF::QActive::post_}
$define ${QF::QActive::postLIFO_}
$define ${QF::QActive::get_}
$define ${QF::QF-base::getQueueMin}
/*==========================================================================*/
/*! Perform downcast to QTicker pointer.
*
* @details
* This macro encapsulates the downcast to (QTicker *), which is used in
* QTicker_init_() and QTicker_dispatch_(). Such casts violate MISRA-C 2012
* Rule 11.3(req) "cast from pointer to object type to pointer to different
* object type".
*/
#define QTICKER_CAST_(me_) ((QActive *)(me_))
$define ${QF::QTicker}
/*! @file
* @brief QActive_defer() and QActive_recall() implementation.
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_defer")
$define ${QF::QActive::defer}
$define ${QF::QActive::recall}
$define ${QF::QActive::flushDeferred}
/*! @file
* @brief QF/C dynamic event management
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
#if (QF_MAX_EPOOL > 0U) /* dynamic events configured? */
Q_DEFINE_THIS_MODULE("qf_dyn")
//============================================================================
$define ${QF::QF-pkg::maxPool_}
$define ${QF::QF-pkg::ePool_[QF_MAX_EPOOL]}
//============================================================================
$define ${QEP::QEvt}
//============================================================================
$define ${QF::QF-dyn}
#endif /* (QF_MAX_EPOOL > 0U) dynamic events configured */
/*! @file
* @brief ::QMPool implementatin (Memory Pool)
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_mem")
$define ${QF::QMPool}
/*! @file
* @brief QActive_ctor() definition
*
* @details
* This file must remain separate from the rest to avoid pulling in the
* "virtual" functions QHsm_init_() and QHsm_dispatch_() in case they
* are not used by the application.
*
* @sa qf_qmact.c
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
Q_DEFINE_THIS_MODULE("qf_qact")
//============================================================================
$define ${QF::QActive::registry_[QF_MAX_ACTIVE + 1U]}
$define ${QF::QF-base::intLock_}
$define ${QF::QF-base::intNest_}
$define ${QF::QF-pkg::readySet_}
$define ${QF::QF-pkg::bzero}
//============================================================================
$define ${QF::QActive::ctor}
$define ${QF::QActive::register_}
$define ${QF::QActive::unregister_}
//============================================================================
$define ${QF-types::QF_LOG2}
/*! @file
* @brief QMActive_ctor() definition
*
* @details
* This file must remain separate from the rest to avoid pulling in the
* "virtual" functions QHsm_init_() and QHsm_dispatch_() in case they
* are not used by the application.
*
* @sa qf_qact.c
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
/*Q_DEFINE_THIS_MODULE("qf_qmact")*/
/*
* This internal macro encapsulates the violation of MISRA-C 2012
* Rule 11.3(req) "A cast shall not be performed between a pointer to
* object type and a poiner to a different object type".
*/
#define QMSM_CAST_(ptr_) ((QMsm *)(ptr_))
$define ${QF::QMActive}
/*! @file
* @brief ::QEQueue implementation (QP native thread-safe queue)
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_qeq")
$define ${QF::QEQueue}
/*! @file
* @brief Publish-Subscribe services
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_ps")
/*==========================================================================*/
$define ${QF::QActive::subscrList_}
$define ${QF::QActive::maxPubSignal_}
$define ${QF::QActive::psInit}
$define ${QF::QActive::publish_}
$define ${QF::QActive::subscribe}
$define ${QF::QActive::unsubscribe}
$define ${QF::QActive::unsubscribeAll}
/*! @file
* @brief QF/C time events and time management services
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
Q_DEFINE_THIS_MODULE("qf_time")
#ifdef Q_SPY
/*! intertnal macro to encapsulate a MISRA deviation
* @details
* This internal macro encapsulates the violation of MISRA-C 2012
* Rule 11.5(A) "A conversion should not be performed from pointer to void
* into pointer to object".
*/
#define QACTIVE_CAST_(ptr_) ((QActive *)(ptr_))
#endif
$define ${QF::QTimeEvt}
/*! @file
* @brief Cooperative QV kernel, implementation of kernel-specific functions.
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope internal interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QV_H_
#error "Source file included in a project NOT based on the QV kernel"
#endif /* QP_INC_QV_H_ */
Q_DEFINE_THIS_MODULE("qv")
/*==========================================================================*/
$define ${QV::QV-base}
$define ${QV::QF-cust}
$define ${QV::QActive}
/*! @file
* @brief QK preemptive kernel implementation
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope internal interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QK_H_
#error "Source file included in a project NOT based on the QK kernel"
#endif /* QP_INC_QK_H_ */
Q_DEFINE_THIS_MODULE("qk")
/*==========================================================================*/
$define ${QK::QK-base}
$define ${QK::QF-cust}
$define ${QK::QActive}
$define ${QK::QK-extern-C}
/*! @file
* @brief QXK preemptive dual-mode kernel core functions
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope internal interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QXK_H_
#error "Source file included in a project NOT based on the QXK kernel"
#endif /* QP_INC_QXK_H_ */
Q_DEFINE_THIS_MODULE("qxk")
/*==========================================================================*/
$define ${QXK::QXK-base}
$define ${QXK::QF-cust}
$define ${QXK::QActive}
$define ${QXK::QXK-extern-C}
/*==========================================================================*/
$define ${QXK-impl}
/*! @file
* @brief QXMutex_init(), QXMutex_lock(), QXMutex_tryLock() and
* QXMutex_unlock() definitions.
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QXK_H_
#error "Source file included in a project NOT based on the QXK kernel"
#endif /* QP_INC_QXK_H_ */
Q_DEFINE_THIS_MODULE("qxk_mutex")
/*==========================================================================*/
$define ${QXK::QXMutex}
/*! @file
* @brief QXK preemptive kernel semaphore functions
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope internal interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QXK_H_
#error "Source file included in a project NOT based on the QXK kernel"
#endif /* QP_INC_QXK_H_ */
Q_DEFINE_THIS_MODULE("qxk_sema")
/*==========================================================================*/
$define ${QXK::QXSemaphore}
/*! @file
* @brief QXK preemptive kernel extended (blocking) thread functions
*/
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope internal interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#ifdef Q_SPY /* QS software tracing enabled? */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
#else
#include "qs_dummy.h" /* disable the QS software tracing */
#endif /* Q_SPY */
/* protection against including this source file in a wrong project */
#ifndef QP_INC_QXK_H_
#error "Source file included in a project NOT based on the QXK kernel"
#endif /* QP_INC_QXK_H_ */
Q_DEFINE_THIS_MODULE("qxk_xthr")
/*==========================================================================*/
$define ${QXK::QXThread}
/*! @file
* @brief QS software tracing services
*/
#define QP_IMPL /* this is QP implementation */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS package-scope interface */
#include "qstamp.h" /* QP time-stamp */
#include "qassert.h" /* QP embedded systems-friendly assertions */
Q_DEFINE_THIS_MODULE("qs")
/* ensure that the predefined records don't overlap the
* user records (application-specific).
*/
Q_ASSERT_STATIC((enum_t)QS_PRE_MAX <= (enum_t)QS_USER);
/*==========================================================================*/
$define ${QS::QS-tx}
/*! @file
* @brief QS long-long (64-bit) output
*/
#define QP_IMPL /* this is QP implementation */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS package-scope internal interface */
$define ${QS::QS-tx-64bit}
/*! @file
* @brief QS floating point output implementation
*/
#define QP_IMPL /* this is QP implementation */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS package-scope internal interface */
$define ${QS::QS-tx-fp}
/*! @file
* @brief QS/C receive channel services
*/
#define QP_IMPL /* this is QP implementation */
#include "qs_port.h" /* QS port */
#include "qs_pkg.h" /* QS package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
Q_DEFINE_THIS_MODULE("qs_rx")
/*==========================================================================*/
#if (QS_OBJ_PTR_SIZE == 1U)
typedef uint8_t QSObj;
#elif (QS_OBJ_PTR_SIZE == 2U)
typedef uint16_t QSObj;
#elif (QS_OBJ_PTR_SIZE == 4U)
typedef uint32_t QSObj;
#elif (QS_OBJ_PTR_SIZE == 8U)
typedef uint64_t QSObj;
#endif
/*! @cond
* Exclude the following internals from the Doxygen documentation
* Extended-state variables used for parsing various QS-RX Records
*/
typedef struct {
uint32_t param1;
uint32_t param2;
uint32_t param3;
uint8_t idx;
uint8_t cmdId;
} CmdVar;
typedef struct {
uint_fast8_t rate;
} TickVar;
typedef struct {
uint16_t offs;
uint8_t size;
uint8_t num;
uint8_t idx;
} PeekVar;
typedef struct {
uint32_t data;
uint16_t offs;
uint8_t size;
uint8_t num;
uint8_t idx;
uint8_t fill;
} PokeVar;
typedef struct {
uint8_t data[16];
uint8_t idx;
int8_t recId; /* global/local */
} FltVar;
typedef struct {
QSObj addr;
uint8_t idx;
uint8_t kind; /* see qs.h, enum QSpyObjKind */
int8_t recId;
} ObjVar;
typedef struct {
QEvt *e;
uint8_t *p;
QSignal sig;
uint16_t len;
uint8_t prio;
uint8_t idx;
} EvtVar;
/* extended-state variables for the current state */
static struct {
union Variant {
CmdVar cmd;
TickVar tick;
PeekVar peek;
PokeVar poke;
FltVar flt;
ObjVar obj;
EvtVar evt;
#ifdef Q_UTEST
struct QS_TProbe tp;
#endif /* Q_UTEST */
} var;
uint8_t state;
uint8_t esc;
uint8_t seq;
uint8_t chksum;
} l_rx;
enum {
ERROR_STATE,
WAIT4_SEQ,
WAIT4_REC,
WAIT4_INFO_FRAME,
WAIT4_CMD_ID,
WAIT4_CMD_PARAM1,
WAIT4_CMD_PARAM2,
WAIT4_CMD_PARAM3,
WAIT4_CMD_FRAME,
WAIT4_RESET_FRAME,
WAIT4_TICK_RATE,
WAIT4_TICK_FRAME,
WAIT4_PEEK_OFFS,
WAIT4_PEEK_SIZE,
WAIT4_PEEK_NUM,
WAIT4_PEEK_FRAME,
WAIT4_POKE_OFFS,
WAIT4_POKE_SIZE,
WAIT4_POKE_NUM,
WAIT4_POKE_DATA,
WAIT4_POKE_FRAME,
WAIT4_FILL_DATA,
WAIT4_FILL_FRAME,
WAIT4_FILTER_LEN,
WAIT4_FILTER_DATA,
WAIT4_FILTER_FRAME,
WAIT4_OBJ_KIND,
WAIT4_OBJ_ADDR,
WAIT4_OBJ_FRAME,
WAIT4_QUERY_KIND,
WAIT4_QUERY_FRAME,
WAIT4_EVT_PRIO,
WAIT4_EVT_SIG,
WAIT4_EVT_LEN,
WAIT4_EVT_PAR,
WAIT4_EVT_FRAME
#ifdef Q_UTEST
,
WAIT4_TEST_SETUP_FRAME,
WAIT4_TEST_TEARDOWN_FRAME,
WAIT4_TEST_PROBE_DATA,
WAIT4_TEST_PROBE_ADDR,
WAIT4_TEST_PROBE_FRAME,
WAIT4_TEST_CONTINUE_FRAME
#endif /* Q_UTEST */
};
/* static helper functions... */
static void QS_rxParseData_(uint8_t const b);
//static void QS_rxHandleGoodFrame_(uint8_t const state);
static void QS_rxHandleBadFrame_(uint8_t const state);
static void QS_rxReportAck_(int8_t const recId);
static void QS_rxReportError_(int8_t const code);
static void QS_rxReportDone_(int8_t const recId);
static void QS_rxPoke_(void);
/*! Internal QS-RX macro to encapsulate transition in the QS-RX FSM */
#define QS_RX_TRAN_(target_) (l_rx.state = (uint8_t)(target_))
/*! @endcond */
/*==========================================================================*/
$define ${QS::QS-rx}
/*==========================================================================*/
static void QS_rxParseData_(uint8_t const b) {
switch (l_rx.state) {
case WAIT4_SEQ: {
++l_rx.seq;
if (l_rx.seq != b) {
QS_rxReportError_(0x42);
l_rx.seq = b; /* update the sequence */
}
QS_RX_TRAN_(WAIT4_REC);
break;
}
case WAIT4_REC: {
switch (b) {
case QS_RX_INFO:
QS_RX_TRAN_(WAIT4_INFO_FRAME);
break;
case QS_RX_COMMAND:
QS_RX_TRAN_(WAIT4_CMD_ID);
break;
case QS_RX_RESET:
QS_RX_TRAN_(WAIT4_RESET_FRAME);
break;
case QS_RX_TICK:
QS_RX_TRAN_(WAIT4_TICK_RATE);
break;
case QS_RX_PEEK:
if (QS_rxPriv_.currObj[AP_OBJ] != (void *)0) {
l_rx.var.peek.offs = 0U;
l_rx.var.peek.idx = 0U;
QS_RX_TRAN_(WAIT4_PEEK_OFFS);
}
else {
QS_rxReportError_((int8_t)QS_RX_PEEK);
QS_RX_TRAN_(ERROR_STATE);
}
break;
case QS_RX_POKE: /* intentionally fall-through */
case QS_RX_FILL:
l_rx.var.poke.fill =
((b == (uint8_t)QS_RX_FILL) ? 1U : 0U);
if (QS_rxPriv_.currObj[AP_OBJ] != (void *)0) {
l_rx.var.poke.offs = 0U;
l_rx.var.poke.idx = 0U;
QS_RX_TRAN_(WAIT4_POKE_OFFS);
}
else {
QS_rxReportError_((l_rx.var.poke.fill != 0U)
? (int8_t)QS_RX_FILL
: (int8_t)QS_RX_POKE);
QS_RX_TRAN_(ERROR_STATE);
}
break;
case QS_RX_GLB_FILTER: /* intentionally fall-through */
case QS_RX_LOC_FILTER:
l_rx.var.flt.recId = (int8_t)b;
QS_RX_TRAN_(WAIT4_FILTER_LEN);
break;
case QS_RX_AO_FILTER: /* intentionally fall-through */
case QS_RX_CURR_OBJ:
l_rx.var.obj.recId = (int8_t)b;
QS_RX_TRAN_(WAIT4_OBJ_KIND);
break;
case QS_RX_QUERY_CURR:
l_rx.var.obj.recId = (int8_t)QS_RX_QUERY_CURR;
QS_RX_TRAN_(WAIT4_QUERY_KIND);
break;
case QS_RX_EVENT:
QS_RX_TRAN_(WAIT4_EVT_PRIO);
break;
#ifdef Q_UTEST
case QS_RX_TEST_SETUP:
QS_RX_TRAN_(WAIT4_TEST_SETUP_FRAME);
break;
case QS_RX_TEST_TEARDOWN:
QS_RX_TRAN_(WAIT4_TEST_TEARDOWN_FRAME);
break;
case QS_RX_TEST_CONTINUE:
QS_RX_TRAN_(WAIT4_TEST_CONTINUE_FRAME);
break;
case QS_RX_TEST_PROBE:
if (QS_testData.tpNum
< (uint8_t)(sizeof(QS_testData.tpBuf)
/ sizeof(QS_testData.tpBuf[0])))
{
l_rx.var.tp.data = 0U;
l_rx.var.tp.idx = 0U;
QS_RX_TRAN_(WAIT4_TEST_PROBE_DATA);
}
else { /* the number of Test-Probes exceeded */
QS_rxReportError_((int8_t)QS_RX_TEST_PROBE);
QS_RX_TRAN_(ERROR_STATE);
}
break;
#endif /* Q_UTEST */
default:
QS_rxReportError_(0x43);
QS_RX_TRAN_(ERROR_STATE);
break;
}
break;
}
case WAIT4_INFO_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_CMD_ID: {
l_rx.var.cmd.cmdId = b;
l_rx.var.cmd.idx = 0U;
l_rx.var.cmd.param1 = 0U;
l_rx.var.cmd.param2 = 0U;
l_rx.var.cmd.param3 = 0U;
QS_RX_TRAN_(WAIT4_CMD_PARAM1);
break;
}
case WAIT4_CMD_PARAM1: {
l_rx.var.cmd.param1 |= ((uint32_t)b << l_rx.var.cmd.idx);
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U * 4U)) {
l_rx.var.cmd.idx = 0U;
QS_RX_TRAN_(WAIT4_CMD_PARAM2);
}
break;
}
case WAIT4_CMD_PARAM2: {
l_rx.var.cmd.param2 |= ((uint32_t)b << l_rx.var.cmd.idx);
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U * 4U)) {
l_rx.var.cmd.idx = 0U;
QS_RX_TRAN_(WAIT4_CMD_PARAM3);
}
break;
}
case WAIT4_CMD_PARAM3: {
l_rx.var.cmd.param3 |= ((uint32_t)b << l_rx.var.cmd.idx);
l_rx.var.cmd.idx += 8U;
if (l_rx.var.cmd.idx == (8U * 4U)) {
l_rx.var.cmd.idx = 0U;
QS_RX_TRAN_(WAIT4_CMD_FRAME);
}
break;
}
case WAIT4_CMD_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_RESET_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_TICK_RATE: {
l_rx.var.tick.rate = (uint_fast8_t)b;
QS_RX_TRAN_(WAIT4_TICK_FRAME);
break;
}
case WAIT4_TICK_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_PEEK_OFFS: {
if (l_rx.var.peek.idx == 0U) {
l_rx.var.peek.offs = (uint16_t)b;
l_rx.var.peek.idx += 8U;
}
else {
l_rx.var.peek.offs |= (uint16_t)((uint16_t)b << 8U);
QS_RX_TRAN_(WAIT4_PEEK_SIZE);
}
break;
}
case WAIT4_PEEK_SIZE: {
if ((b == 1U) || (b == 2U) || (b == 4U)) {
l_rx.var.peek.size = b;
QS_RX_TRAN_(WAIT4_PEEK_NUM);
}
else {
QS_rxReportError_((int8_t)QS_RX_PEEK);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_PEEK_NUM: {
l_rx.var.peek.num = b;
QS_RX_TRAN_(WAIT4_PEEK_FRAME);
break;
}
case WAIT4_PEEK_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_POKE_OFFS: {
if (l_rx.var.poke.idx == 0U) {
l_rx.var.poke.offs = (uint16_t)b;
l_rx.var.poke.idx = 1U;
}
else {
l_rx.var.poke.offs |= (uint16_t)((uint16_t)b << 8U);
QS_RX_TRAN_(WAIT4_POKE_SIZE);
}
break;
}
case WAIT4_POKE_SIZE: {
if ((b == 1U) || (b == 2U) || (b == 4U)) {
l_rx.var.poke.size = b;
QS_RX_TRAN_(WAIT4_POKE_NUM);
}
else {
QS_rxReportError_((l_rx.var.poke.fill != 0U)
? (int8_t)QS_RX_FILL
: (int8_t)QS_RX_POKE);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_POKE_NUM: {
if (b > 0U) {
l_rx.var.poke.num = b;
l_rx.var.poke.data = 0U;
l_rx.var.poke.idx = 0U;
QS_RX_TRAN_((l_rx.var.poke.fill != 0U)
? WAIT4_FILL_DATA
: WAIT4_POKE_DATA);
}
else {
QS_rxReportError_((l_rx.var.poke.fill != 0U)
? (int8_t)QS_RX_FILL
: (int8_t)QS_RX_POKE);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_FILL_DATA: {
l_rx.var.poke.data |= ((uint32_t)b << l_rx.var.poke.idx);
l_rx.var.poke.idx += 8U;
if ((uint8_t)(l_rx.var.poke.idx >> 3U) == l_rx.var.poke.size) {
QS_RX_TRAN_(WAIT4_FILL_FRAME);
}
break;
}
case WAIT4_POKE_DATA: {
l_rx.var.poke.data |= ((uint32_t)b << l_rx.var.poke.idx);
l_rx.var.poke.idx += 8U;
if ((uint8_t)(l_rx.var.poke.idx >> 3U) == l_rx.var.poke.size) {
QS_rxPoke_();
--l_rx.var.poke.num;
if (l_rx.var.poke.num == 0U) {
QS_RX_TRAN_(WAIT4_POKE_FRAME);
}
}
break;
}
case WAIT4_FILL_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_POKE_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_FILTER_LEN: {
if (b == sizeof(l_rx.var.flt.data)) {
l_rx.var.flt.idx = 0U;
QS_RX_TRAN_(WAIT4_FILTER_DATA);
}
else {
QS_rxReportError_(l_rx.var.flt.recId);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_FILTER_DATA: {
l_rx.var.flt.data[l_rx.var.flt.idx] = b;
++l_rx.var.flt.idx;
if (l_rx.var.flt.idx == sizeof(l_rx.var.flt.data)) {
QS_RX_TRAN_(WAIT4_FILTER_FRAME);
}
break;
}
case WAIT4_FILTER_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_OBJ_KIND: {
if (b <= (uint8_t)SM_AO_OBJ) {
l_rx.var.obj.kind = b;
l_rx.var.obj.addr = 0U;
l_rx.var.obj.idx = 0U;
QS_RX_TRAN_(WAIT4_OBJ_ADDR);
}
else {
QS_rxReportError_(l_rx.var.obj.recId);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_OBJ_ADDR: {
l_rx.var.obj.addr |= ((QSObj)b << l_rx.var.obj.idx);
l_rx.var.obj.idx += 8U;
if (l_rx.var.obj.idx == (uint8_t)(8U * QS_OBJ_PTR_SIZE)) {
QS_RX_TRAN_(WAIT4_OBJ_FRAME);
}
break;
}
case WAIT4_OBJ_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_QUERY_KIND: {
if (b < (uint8_t)MAX_OBJ) {
l_rx.var.obj.kind = b;
QS_RX_TRAN_(WAIT4_QUERY_FRAME);
}
else {
QS_rxReportError_(l_rx.var.obj.recId);
QS_RX_TRAN_(ERROR_STATE);
}
break;
}
case WAIT4_QUERY_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_EVT_PRIO: {
l_rx.var.evt.prio = b;
l_rx.var.evt.sig = 0U;
l_rx.var.evt.idx = 0U;
QS_RX_TRAN_(WAIT4_EVT_SIG);
break;
}
case WAIT4_EVT_SIG: {
l_rx.var.evt.sig |= (QSignal)((uint32_t)b << l_rx.var.evt.idx);
l_rx.var.evt.idx += 8U;
if (l_rx.var.evt.idx == (uint8_t)(8U * Q_SIGNAL_SIZE)) {
l_rx.var.evt.len = 0U;
l_rx.var.evt.idx = 0U;
QS_RX_TRAN_(WAIT4_EVT_LEN);
}
break;
}
case WAIT4_EVT_LEN: {
l_rx.var.evt.len |= (uint16_t)((uint32_t)b << l_rx.var.evt.idx);
l_rx.var.evt.idx += 8U;
if (l_rx.var.evt.idx == (8U * 2U)) {
if ((l_rx.var.evt.len + sizeof(QEvt)) <=
QF_poolGetMaxBlockSize())
{
/* report Ack before generating any other QS records */
QS_rxReportAck_((int8_t)QS_RX_EVENT);
l_rx.var.evt.e = QF_newX_(
((uint_fast16_t)l_rx.var.evt.len + sizeof(QEvt)),
0U, /* margin */
(enum_t)l_rx.var.evt.sig);
if (l_rx.var.evt.e != (QEvt *)0) { /* evt allocated? */
l_rx.var.evt.p = (uint8_t *)l_rx.var.evt.e;
l_rx.var.evt.p = &l_rx.var.evt.p[sizeof(QEvt)];
if (l_rx.var.evt.len > 0U) {
QS_RX_TRAN_(WAIT4_EVT_PAR);
}
else {
QS_RX_TRAN_(WAIT4_EVT_FRAME);
}
}
else {
QS_rxReportError_((int8_t)QS_RX_EVENT);
QS_RX_TRAN_(ERROR_STATE);
}
}
else {
QS_rxReportError_((int8_t)QS_RX_EVENT);
QS_RX_TRAN_(ERROR_STATE);
}
}
break;
}
case WAIT4_EVT_PAR: { /* event parameters */
*l_rx.var.evt.p = b;
++l_rx.var.evt.p;
--l_rx.var.evt.len;
if (l_rx.var.evt.len == 0U) {
QS_RX_TRAN_(WAIT4_EVT_FRAME);
}
break;
}
case WAIT4_EVT_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
#ifdef Q_UTEST
case WAIT4_TEST_SETUP_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_TEST_TEARDOWN_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_TEST_CONTINUE_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
case WAIT4_TEST_PROBE_DATA: {
l_rx.var.tp.data |= ((uint32_t)b << l_rx.var.tp.idx);
l_rx.var.tp.idx += 8U;
if (l_rx.var.tp.idx == (uint8_t)(8U * sizeof(uint32_t))) {
l_rx.var.tp.addr = 0U;
l_rx.var.tp.idx = 0U;
QS_RX_TRAN_(WAIT4_TEST_PROBE_ADDR);
}
break;
}
case WAIT4_TEST_PROBE_ADDR: {
l_rx.var.tp.addr |= ((QSFun)b << l_rx.var.tp.idx);
l_rx.var.tp.idx += 8U;
if (l_rx.var.tp.idx == (uint8_t)(8U * QS_FUN_PTR_SIZE)) {
QS_RX_TRAN_(WAIT4_TEST_PROBE_FRAME);
}
break;
}
case WAIT4_TEST_PROBE_FRAME: {
/* keep ignoring the data until a frame is collected */
break;
}
#endif /* Q_UTEST */
case ERROR_STATE: {
/* keep ignoring the data until a good frame is collected */
break;
}
default: { /* unexpected or unimplemented state */
QS_rxReportError_(0x45);
QS_RX_TRAN_(ERROR_STATE);
break;
}
}
}
/*..........................................................................*/
static void QS_rxHandleBadFrame_(uint8_t const state) {
QS_rxReportError_(0x50); /* report error for all bad frames */
switch (state) {
case WAIT4_EVT_FRAME: {
Q_ASSERT_ID(910, l_rx.var.evt.e != (QEvt *)0);
#if (QF_MAX_EPOOL > 0U)
QF_gc(l_rx.var.evt.e); /* don't leak an allocated event */
#endif
break;
}
default: {
/* intentionally empty */
break;
}
}
}
/*..........................................................................*/
static void QS_rxReportAck_(int8_t const recId) {
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_RX_STATUS);
QS_U8_PRE_(recId); /* record ID */
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
}
/*..........................................................................*/
static void QS_rxReportError_(int8_t const code) {
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_RX_STATUS);
QS_U8_PRE_(0x80U | (uint8_t)code); /* error code */
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
}
/*..........................................................................*/
static void QS_rxReportDone_(int8_t const recId) {
QS_CRIT_STAT_
QS_CRIT_E_();
QS_beginRec_((uint_fast8_t)QS_TARGET_DONE);
QS_TIME_PRE_(); /* timestamp */
QS_U8_PRE_(recId); /* record ID */
QS_endRec_();
QS_CRIT_X_();
QS_REC_DONE(); /* user callback (if defined) */
}
/*..........................................................................*/
static void QS_rxPoke_(void) {
uint8_t *ptr = (uint8_t *)QS_rxPriv_.currObj[AP_OBJ];
ptr = &ptr[l_rx.var.poke.offs];
switch (l_rx.var.poke.size) {
case 1:
*ptr = (uint8_t)l_rx.var.poke.data;
break;
case 2:
*(uint16_t *)ptr = (uint16_t)l_rx.var.poke.data;
break;
case 4:
*(uint32_t *)ptr = l_rx.var.poke.data;
break;
default:
Q_ERROR_ID(900);
break;
}
l_rx.var.poke.data = 0U;
l_rx.var.poke.idx = 0U;
l_rx.var.poke.offs += (uint16_t)l_rx.var.poke.size;
}
/*! @file
* @brief QUTest unit testing harness
*/
/* Include this content in the build only when Q_UTEST is defined */
#ifdef Q_UTEST
#define QP_IMPL /* this is QP implementation */
#include "qf_port.h" /* QF port */
#include "qf_pkg.h" /* QF package-scope interface */
#include "qassert.h" /* QP embedded systems-friendly assertions */
#include "qs_port.h" /* include QS port */
#include "qs_pkg.h" /* QS facilities for pre-defined trace records */
/*==========================================================================*/
/* QUTest unit testing harness */
$define ${QUTest}
/*..........................................................................*/
Q_NORETURN Q_onAssert(
char const * module,
int_t location)
{
QS_BEGIN_NOCRIT_PRE_(QS_ASSERT_FAIL, 0U)
QS_TIME_PRE_();
QS_U16_PRE_(location);
QS_STR_PRE_((module != (char *)0) ? module : "?");
QS_END_NOCRIT_PRE_()
QS_onFlush(); /* flush the assertion record to the host */
QS_onCleanup(); /* cleanup after the failure */
QS_onReset(); /* reset the target to prevent the code from continuing */
for (;;) { /* QS_onReset() should not return, but to ensure no-return */
}
}
/*..........................................................................*/
QSTimeCtr QS_onGetTime(void) {
return (++QS_testData.testTime);
}
/*==========================================================================*/
/* QP-stub for QUTest
* NOTE: The QP-stub is needed for unit testing QP applications, but might
* NOT be needed for testing QP itself. In that case, the build process
* can define Q_UTEST=0 to exclude the QP-stub from the build.
*/
#if Q_UTEST != 0
Q_DEFINE_THIS_MODULE("qutest")
$define ${QUTest-stub}
#endif /* Q_UTEST != 0 */
#endif /* Q_UTEST */
/*! @file
* @brief Application build time-stamp
* @details
* This module needs to be re-compiled in every new software build. To achive
* this, it is recommended to delete the object file (qstamp.o, or qstamp.obj)
* in the build directory before each build. (Most development tools allow
* you to specify a pre-build command, which is the ideal place to delete
* the qstamp object file.)
*/
#include "qstamp.h"
/*! the calendar date of the last translation of the form: "Mmm dd yyyy" */
char const Q_BUILD_DATE[12] = __DATE__;
/*! the time of the last translation of the form: "hh:mm:ss" */
char const Q_BUILD_TIME[9] = __TIME__;