1
0
mirror of https://github.com/azure-rtos/threadx synced 2025-01-16 07:42:57 +08:00

Add Cortex M55 and M85 to ports

This commit is contained in:
Scott Larson 2022-07-07 11:42:14 -07:00
parent 83b57acde9
commit 54cda6ee9e
126 changed files with 27318 additions and 0 deletions

View File

@ -0,0 +1,21 @@
target_sources(${PROJECT_NAME} PRIVATE
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_allocate.c
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_free.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_initialize_low_level.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_save.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_control.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_disable.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_schedule.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_allocate.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_free.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_stack_build.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_system_return.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_timer_interrupt.S
)
target_include_directories(${PROJECT_NAME} PUBLIC
inc
)

View File

@ -0,0 +1,649 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M55/AC6 */
/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M55 files. It unifies */
/* the Cortex-M55 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Function prototypes for this port. */
struct TX_THREAD_STRUCT;
UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
/* Services without error checking. */
#define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _tx_thread_secure_stack_free
#else
/* Services with error checking. */
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Initialize secure stacks for threads calling secure functions. */
extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M55/AC6 Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,61 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_secure_interface.h PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file defines the ThreadX secure thread stack components, */
/* including data types and external references. */
/* It is assumed that tx_api.h and tx_port.h have already been */
/* included. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_SECURE_INTERFACE_H
#define TX_SECURE_INTERFACE_H
/* Define internal secure thread stack function prototypes. */
extern UINT _tx_thread_secure_mode_stack_initialize(void);
extern UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size);
extern UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_initialize(void);
extern void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr);
#endif

View File

@ -0,0 +1,228 @@
Microsoft's Azure RTOS ThreadX for Cortex-M55
Using the AC6 Tools in Keil uVision
1. Import the ThreadX Projects
In order to build the ThreadX library and the ThreadX demonstration, first open
the AzureRTOS.uvmpw workspace (located in the "example_build" directory)
into Keil.
2. Building the ThreadX run-time Library
Building the ThreadX library is easy; simply set the ThreadX_Library project
as active, then then build the library. You should now observe the compilation
and assembly of the ThreadX library. This project build produces the ThreadX
library file ThreadX_Library.lib.
Files tx_thread_stack_error_handler.c and tx_thread_stack_error_notify.c
replace the common files of the same name.
3. Demonstration System
The ThreadX demonstration is designed to execute under the Keil debugger on the
FVP_MPS2_Cortex-M55_MDK simulator.
Building the demonstration is easy; simply select the "Batch Build" button.
You should now observe the compilation and assembly of the ThreadX demonstration of
both the demo_secure_zone and demo_threadx_non-secure_zone projects.
Then click the Start/Stop Debug Session button to start the simulator and begin debugging.
You are now ready to execute the ThreadX demonstration.
4. System Initialization
The entry point in ThreadX for the Cortex-M55 using AC6 tools uses the standard AC6
Cortex-M55 reset sequence. From the reset vector the C runtime will be initialized.
The ThreadX tx_initialize_low_level.s file is responsible for setting up
various system data structures, the vector area, and a periodic timer interrupt
source.
In addition, _tx_initialize_low_level determines the first available
address for use by the application, which is supplied as the sole input
parameter to your application definition function, tx_application_define.
5. Register Usage and Stack Frames
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have the same stack frame in the Cortex-M55 version of
ThreadX. The top of the suspended thread's stack is pointed to by
tx_thread_stack_ptr in the associated thread control block TX_THREAD.
Non-FPU Stack Frame:
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 r4 Software stacked GP registers
0x08 r5
0x0C r6
0x10 r7
0x14 r8
0x18 r9
0x1C r10
0x20 r11
0x24 r0 Hardware stacked registers
0x28 r1
0x2C r2
0x30 r3
0x34 r12
0x38 lr
0x3C pc
0x40 xPSR
FPU Stack Frame (only interrupted thread with FPU enabled):
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 s16 Software stacked FPU registers
0x08 s17
0x0C s18
0x10 s19
0x14 s20
0x18 s21
0x1C s22
0x20 s23
0x24 s24
0x28 s25
0x2C s26
0x30 s27
0x34 s28
0x38 s29
0x3C s30
0x40 s31
0x44 r4 Software stacked registers
0x48 r5
0x4C r6
0x50 r7
0x54 r8
0x58 r9
0x5C r10
0x60 r11
0x64 r0 Hardware stacked registers
0x68 r1
0x6C r2
0x70 r3
0x74 r12
0x78 lr
0x7C pc
0x80 xPSR
0x84 s0 Hardware stacked FPU registers
0x88 s1
0x8C s2
0x90 s3
0x94 s4
0x98 s5
0x9C s6
0xA0 s7
0xA4 s8
0xA8 s9
0xAC s10
0xB0 s11
0xB4 s12
0xB8 s13
0xBC s14
0xC0 s15
0xC4 fpscr
6. Improving Performance
To make ThreadX and the application(s) run faster, you can enable
all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
7. Interrupt Handling
ThreadX provides complete and high-performance interrupt handling for Cortex-M55
targets. There are a certain set of requirements that are defined in the
following sub-sections:
7.1 Vector Area
The Cortex-M55 vectors start at the label __Vectors or similar. The application may modify
the vector area according to its needs. There is code in tx_initialize_low_level() that will
configure the vector base register.
7.2 Managed Interrupts
ISRs can be written completely in C (or assembly language) without any calls to
_tx_thread_context_save or _tx_thread_context_restore. These ISRs are allowed access to the
ThreadX API that is available to ISRs.
ISRs written in C will take the form (where "your_C_isr" is an entry in the vector table):
void your_C_isr(void)
{
/* ISR processing goes here, including any needed function calls. */
}
ISRs written in assembly language will take the form:
.global your_assembly_isr
.thumb_func
your_assembly_isr:
; VOID your_assembly_isr(VOID)
; {
PUSH {r0, lr}
;
; /* Do interrupt handler work here */
; /* BL <your interrupt routine in C> */
POP {r0, lr}
BX lr
; }
Note: the Cortex-M55 requires exception handlers to be thumb labels, this implies bit 0 set.
To accomplish this, the declaration of the label has to be preceded by the assembler directive
.thumb_func to instruct the linker to create thumb labels. The label __tx_IntHandler needs to
be inserted in the correct location in the interrupt vector table. This table is typically
located in either your runtime startup file or in the tx_initialize_low_level.s file.
8. FPU Support
ThreadX for Cortex-M55 supports automatic ("lazy") VFP support, which means that applications threads
can simply use the VFP and ThreadX automatically maintains the VFP registers as part of the thread
context.
9. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06-02-2021 Release 6.1.7 changes:
tx_port.h Remove unneeded include file
tx_thread_secure_stack_initialize.S New file
tx_thread_schedule.S Added secure stack initialize to SVC hander
tx_thread_secure_stack.c Fixed stack pointer save, initialize in handler mode
04-02-2021 Release 6.1.6 changes:
tx_port.h Updated macro definition
tx_thread_schedule.s Added low power support
03-02-2021 The following files were changed/added for version 6.1.5:
tx_port.h Added ULONG64_DEFINED
09-30-2020 Initial ThreadX 6.1 version for Cortex-M55 using AC6 tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@ -0,0 +1,278 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
SYSTEM_CLOCK = 6000000
SYSTICK_CYCLES = ((SYSTEM_CLOCK / 100) -1)
/* Setup the stack and heap areas. */
STACK_SIZE = 0x00000400
HEAP_SIZE = 0x00000000
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_initialize_low_level
.thumb_func
.type _tx_initialize_low_level, function
_tx_initialize_low_level:
/* Disable interrupts during ThreadX initialization. */
CPSID i
/* Set base of available memory to end of non-initialised RAM area. */
LDR r0, =_tx_initialize_unused_memory // Build address of unused memory pointer
LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Build first free address
ADD r1, r1, #4 //
STR r1, [r0] // Setup first unused memory pointer
/* Setup Vector Table Offset Register. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =__Vectors // Pickup address of vector table
STR r1, [r0, #0xD08] // Set vector table address
/* Enable the cycle count register. */
LDR r0, =0xE0001000 // Build address of DWT register
LDR r1, [r0] // Pickup the current value
ORR r1, r1, #1 // Set the CYCCNTENA bit
STR r1, [r0] // Enable the cycle count register
/* Set system stack pointer from vector value. */
LDR r0, =_tx_thread_system_stack_ptr // Build address of system stack pointer
LDR r1, =__Vectors // Pickup address of vector table
LDR r1, [r1] // Pickup reset stack pointer
STR r1, [r0] // Save system stack pointer
/* Configure SysTick. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =SYSTICK_CYCLES
STR r1, [r0, #0x14] // Setup SysTick Reload Value
MOV r1, #0x7 // Build SysTick Control Enable Value
STR r1, [r0, #0x10] // Setup SysTick Control
/* Configure handler priorities. */
LDR r1, =0x00000000 // Rsrv, UsgF, BusF, MemM
STR r1, [r0, #0xD18] // Setup System Handlers 4-7 Priority Registers
LDR r1, =0xFF000000 // SVCl, Rsrv, Rsrv, Rsrv
STR r1, [r0, #0xD1C] // Setup System Handlers 8-11 Priority Registers
// Note: SVC must be lowest priority, which is 0xFF
LDR r1, =0x40FF0000 // SysT, PnSV, Rsrv, DbgM
STR r1, [r0, #0xD20] // Setup System Handlers 12-15 Priority Registers
// Note: PnSV must be lowest priority, which is 0xFF
/* Return to caller. */
BX lr
// }
/* Define shells for each of the unused vectors. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_BadHandler
.thumb_func
.type __tx_BadHandler, function
__tx_BadHandler:
B __tx_BadHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_IntHandler
.thumb_func
.type __tx_IntHandler, function
__tx_IntHandler:
// VOID InterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
/* Do interrupt handler work here */
/* .... */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX LR
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SysTick_Handler
.thumb_func
.type SysTick_Handler, function
SysTick_Handler:
// VOID TimerInterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
BL _tx_timer_interrupt
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX LR
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global HardFault_Handler
.thumb_func
.type HardFault_Handler, function
HardFault_Handler:
B HardFault_Handler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global UsageFault_Handler
.thumb_func
.type UsageFault_Handler, function
UsageFault_Handler:
CPSID i // Disable interrupts
// Check for stack limit fault
LDR r0, =0xE000ED28 // CFSR address
LDR r1,[r0] // Pick up CFSR
TST r1, #0x00100000 // Check for Stack Overflow
_unhandled_usage_loop:
BEQ _unhandled_usage_loop // If not stack overflow then loop
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARM_PCS_VFP
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
STR r1, [r0] // Store the value
#endif
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r0,[r0] // Pick up current thread pointer
PUSH {r0,lr} // Save LR (and r0 to maintain stack alignment)
BL _tx_thread_stack_error_handler // Call ThreadX/user handler
POP {r0,lr} // Restore LR and dummy reg
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
// Call the thread exit function to indicate the thread is no longer executing.
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#endif
MOV r1, #0 // Build NULL value
LDR r0, =_tx_thread_current_ptr // Pickup address of current thread pointer
STR r1, [r0] // Clear current thread pointer
// Return from UsageFault_Handler exception
LDR r0, =0xE000ED04 // Load ICSR
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
CPSIE i // Enable interrupts
BX lr // Return from exception
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_NMIHandler
.thumb_func
.type __tx_NMIHandler, function
__tx_NMIHandler:
B __tx_NMIHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_DBGHandler
.thumb_func
.type __tx_DBGHandler, function
__tx_DBGHandler:
B __tx_DBGHandler
.end

View File

@ -0,0 +1,719 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
.global __aeabi_memset
.global _tx_thread_current_ptr
.global _tx_thread_interrupt_disable
.global _tx_thread_interrupt_restore
.global _tx_thread_stack_analyze
.global _tx_thread_stack_error_handler
.global _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_trace_buffer_current_ptr
.global _tx_trace_buffer_end_ptr
.global _tx_trace_buffer_start_ptr
.global _tx_trace_event_enable_bits
.global _tx_trace_full_notify_function
.global _tx_trace_header_ptr
#endif
.global _tx_misra_always_true
.global _tx_misra_block_pool_to_uchar_pointer_convert
.global _tx_misra_byte_pool_to_uchar_pointer_convert
.global _tx_misra_char_to_uchar_pointer_convert
.global _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_entry_to_uchar_pointer_convert
#endif
.global _tx_misra_indirect_void_to_uchar_pointer_convert
.global _tx_misra_memset
.global _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_object_to_uchar_pointer_convert
#endif
.global _tx_misra_pointer_to_ulong_convert
.global _tx_misra_status_get
.global _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_time_stamp_get
#endif
.global _tx_misra_timer_indirect_to_void_pointer_convert
.global _tx_misra_timer_pointer_add
.global _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_trace_event_insert
#endif
.global _tx_misra_uchar_pointer_add
.global _tx_misra_uchar_pointer_dif
.global _tx_misra_uchar_pointer_sub
.global _tx_misra_uchar_to_align_type_pointer_convert
.global _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_entry_pointer_convert
.global _tx_misra_uchar_to_header_pointer_convert
#endif
.global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
.global _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_object_pointer_convert
#endif
.global _tx_misra_uchar_to_void_pointer_convert
.global _tx_misra_ulong_pointer_add
.global _tx_misra_ulong_pointer_dif
.global _tx_misra_ulong_pointer_sub
.global _tx_misra_ulong_to_pointer_convert
.global _tx_misra_ulong_to_thread_pointer_convert
.global _tx_misra_user_timer_pointer_get
.global _tx_misra_void_to_block_pool_pointer_convert
.global _tx_misra_void_to_byte_pool_pointer_convert
.global _tx_misra_void_to_event_flags_pointer_convert
.global _tx_misra_void_to_indirect_uchar_pointer_convert
.global _tx_misra_void_to_mutex_pointer_convert
.global _tx_misra_void_to_queue_pointer_convert
.global _tx_misra_void_to_semaphore_pointer_convert
.global _tx_misra_void_to_thread_pointer_convert
.global _tx_misra_void_to_uchar_pointer_convert
.global _tx_misra_void_to_ulong_pointer_convert
.global _tx_misra_ipsr_get
.global _tx_misra_control_get
.global _tx_misra_control_set
#ifdef __ARM_FP
.global _tx_misra_fpccr_get
.global _tx_misra_vfp_touch
#endif
.global _tx_misra_event_flags_group_not_used
.global _tx_misra_event_flags_set_notify_not_used
.global _tx_misra_queue_not_used
.global _tx_misra_queue_send_notify_not_used
.global _tx_misra_semaphore_not_used
.global _tx_misra_semaphore_put_notify_not_used
.global _tx_misra_thread_entry_exit_notify_not_used
.global _tx_misra_thread_not_used
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
.thumb_func
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
.text
.thumb_func
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N _tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N _tx_misra_message_copy_1
_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N _tx_misra_message_copy_2
_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR R2,=0x54485244
CMP R1,R2
BNE.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_3
_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N _tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N _tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N _tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N _tx_misra_trace_event_insert_4
LDR.N R0,DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N _tx_misra_trace_event_insert_0
_tx_misra_trace_event_insert_4:
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
.data
DataTable2_1:
.word _tx_trace_buffer_current_ptr
.data
DataTable2_2:
.word _tx_trace_event_enable_bits
.data
DataTable2_5:
.word _tx_trace_buffer_end_ptr
.data
DataTable2_6:
.word _tx_trace_buffer_start_ptr
.data
DataTable2_7:
.word _tx_trace_header_ptr
.data
DataTable2_8:
.word _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
.data
DataTable2_3:
.word _tx_thread_system_state
.data
DataTable2_4:
.word _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARM_FP
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
.data
.word 0

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_restore
.thumb_func
.type _tx_thread_context_restore, function
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_enter
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_save
.thumb_func
.type _tx_thread_context_save, function
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,82 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
.section .text
.balign 4
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_control
.thumb_func
.type _tx_thread_interrupt_control, function
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_disable
.thumb_func
.type _tx_thread_interrupt_disable, function
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }
.end

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_restore
.thumb_func
.type _tx_thread_interrupt_restore, function
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }
.end

View File

@ -0,0 +1,391 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_thread_enter
.global _tx_execution_thread_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M55/AC6 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 04-02-2021 Scott Larson Modified comment(s), added */
/* low power code, */
/* resulting in version 6.1.6 */
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_schedule
.thumb_func
.type _tx_thread_schedule, function
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARM_PCS_VFP
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global PendSV_Handler
.thumb_func
.type PendSV_Handler, function
/* Get current thread value and new thread pointer. */
PendSV_Handler:
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_PCS_VFP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
STR r12, [r1, #8] // Save the thread stack pointer
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Save secure context
LDR r5, [r1,#0x90] // Load secure stack index
CBZ r5, _skip_secure_save // Skip save if there is no secure context
PUSH {r0,r1,r2,r3} // Save scratch registers
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_save // Save secure stack
POP {r0,r1,r2,r3} // Restore secure registers
_skip_secure_save:
#endif
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Restore secure context
LDR r0, [r1,#0x90] // Load secure stack index
CBZ r0, _skip_secure_restore // Skip restore if there is no secure context
PUSH {r0,r1} // Save r1 (and dummy r0)
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_restore // Restore secure stack
POP {r0,r1} // Restore r1 (and dummy r0)
_skip_secure_restore:
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #12] // Get stack start
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_PCS_VFP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// SVC_Handler is not needed when ThreadX is running in single mode.
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SVC_Handler
.thumb_func
.type SVC_Handler, function
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ r0, MSP // Get MSP if return stack is MSP
MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r1, [r1,#-2] // Load SVC number
CMP r1, #1 // Is it a secure stack allocate request?
BEQ _tx_svc_secure_alloc // Yes, go there
CMP r1, #2 // Is it a secure stack free request?
BEQ _tx_svc_secure_free // Yes, go there
CMP r1, #3 // Is it a secure stack init request?
BEQ _tx_svc_secure_init // Yes, go there
// Unknown SVC argument - just return
BX lr
_tx_svc_secure_alloc:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_allocate
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_free:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_free
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_init:
PUSH {r0,lr} // Save SP and EXC_RETURN
BL _tx_thread_secure_mode_stack_initialize
POP {r12,lr} // Restore SP and EXC_RETURN
BX lr
#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_vfp_access
.thumb_func
.type _tx_vfp_access, function
_tx_vfp_access:
VMOV.F32 s0, s0 // Simply access the VFP
BX lr // Return to caller
.end

View File

@ -0,0 +1,592 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_SOURCE_CODE
#include "cmsis_compiler.h" /* For intrinsic functions. */
#include "tx_secure_interface.h" /* Interface for NS code. */
/* Minimum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MINIMUM
#define TX_THREAD_SECURE_STACK_MINIMUM 256
#endif
/* Maximum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MAXIMUM
#define TX_THREAD_SECURE_STACK_MAXIMUM 1024
#endif
/* 8 bytes added to stack size to "seal" stack. */
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
VOID *tx_thread_secure_stack_ptr; /* Thread's secure stack current pointer */
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M55/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function initializes secure mode to use PSP stack. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* status */
/* */
/* CALLS */
/* */
/* __get_CONTROL Intrinsic to get CONTROL */
/* __set_CONTROL Intrinsic to set CONTROL */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Modified comment(s), and */
/* changed name, execute in */
/* handler mode, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else
{
/* Set secure mode to use PSP. */
__set_CONTROL(__get_CONTROL() | 2);
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M55/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function allocates a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of stack to allocates */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_SIZE_ERROR Invalid stack size */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* malloc Compiler's malloc function */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* __TZ_get_PSPLIM_NS Intrinsic to get NS PSP */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else if (stack_size < TX_THREAD_SECURE_STACK_MINIMUM || stack_size > TX_THREAD_SECURE_STACK_MAXIMUM)
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
info_ptr -> tx_thread_secure_stack_limit = stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == __TZ_get_PSPLIM_NS())
{
/* If this thread is running, set Secure PSP and PSPLIM. */
__set_PSPLIM((ULONG)(info_ptr -> tx_thread_secure_stack_limit));
__set_PSP((ULONG)(info_ptr -> tx_thread_secure_stack_ptr));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M55/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function frees a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M55/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __get_PSP Intrinsic to get PSP */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
sp = __get_PSP();
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
return;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M55/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
__set_PSPLIM((ULONG)info_ptr -> tx_thread_secure_stack_limit);
__set_PSP ((ULONG)info_ptr -> tx_thread_secure_stack_ptr);
return;
}
#endif

View File

@ -0,0 +1,85 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to allocate a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 1 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_allocate
.thumb_func
.type _tx_thread_secure_stack_allocate, function
_tx_thread_secure_stack_allocate:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 1
CMP r3, #0 // If interrupts enabled, just return
BEQ _alloc_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_alloc_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_free Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to free a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 2 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_free(TX_THREAD *thread_ptr)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_free
.thumb_func
.type _tx_thread_secure_stack_free, function
_tx_thread_secure_stack_free:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 2
CMP r3, #0 // If interrupts enabled, just return
BEQ _free_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_free_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_initialize Cortex-M55/AC6 */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to initialize a secure stack. */
/* */
/* INPUT */
/* */
/* none */
/* */
/* OUTPUT */
/* */
/* none */
/* */
/* CALLS */
/* */
/* SVC 3 */
/* */
/* CALLED BY */
/* */
/* TX_INITIALIZE_KERNEL_ENTER_EXTENSION */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_secure_stack_initialize(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_initialize
.thumb_func
.type _tx_thread_secure_stack_initialize, function
_tx_thread_secure_stack_initialize:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
CPSIE i // Enable interrupts for SVC call
SVC 3
CPSID i // Disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
BX lr
.end

View File

@ -0,0 +1,140 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_stack_build
.thumb_func
.type _tx_thread_stack_build, function
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-M should look like the following after it is built:
Stack Top:
LR Interrupted LR (LR at time of PENDSV)
r4 Initial value for r4
r5 Initial value for r5
r6 Initial value for r6
r7 Initial value for r7
r8 Initial value for r8
r9 Initial value for r9
r10 Initial value for r10
r11 Initial value for r11
r0 Initial value for r0 (Hardware stack starts here!!)
r1 Initial value for r1
r2 Initial value for r2
r3 Initial value for r3
r12 Initial value for r12
lr Initial value for lr
pc Initial value for pc
xPSR Initial value for xPSR
Stack Bottom: (higher memory address) */
LDR r2, [r0, #16] // Pickup end of stack area
BIC r2, r2, #0x7 // Align frame for 8-byte alignment
SUB r2, r2, #68 // Subtract frame size
#ifdef TX_SINGLE_MODE_SECURE
LDR r3, =0xFFFFFFFD // Build initial LR value for secure mode
#else
LDR r3, =0xFFFFFFBC // Build initial LR value to return to non-secure PSP
#endif
STR r3, [r2, #0] // Save on the stack
/* Actually build the stack frame. */
MOV r3, #0 // Build initial register value
STR r3, [r2, #4] // Store initial r4
STR r3, [r2, #8] // Store initial r5
STR r3, [r2, #12] // Store initial r6
STR r3, [r2, #16] // Store initial r7
STR r3, [r2, #20] // Store initial r8
STR r3, [r2, #24] // Store initial r9
STR r3, [r2, #28] // Store initial r10
STR r3, [r2, #32] // Store initial r11
/* Hardware stack follows. */
STR r3, [r2, #36] // Store initial r0
STR r3, [r2, #40] // Store initial r1
STR r3, [r2, #44] // Store initial r2
STR r3, [r2, #48] // Store initial r3
STR r3, [r2, #52] // Store initial r12
MOV r3, #0xFFFFFFFF // Poison EXC_RETURN value
STR r3, [r2, #56] // Store initial lr
STR r1, [r2, #60] // Store initial pc
MOV r3, #0x01000000 // Only T-bit need be set
STR r3, [r2, #64] // Store initial xPSR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = r2;
STR r2, [r0, #8] // Save stack pointer in thread's
// control block
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,96 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_system_return
.thumb_func
.type _tx_thread_system_return, function
_tx_thread_system_return:
/* Return to real scheduler via PendSV. Note that this routine is often
replaced with in-line assembly in tx_port.h to improved performance. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
MRS r0, IPSR // Pickup IPSR
CMP r0, #0 // Is it a thread returning?
BNE _isr_context // If ISR, skip interrupt enable
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Thread context returning, pickup BASEPRI
MOV r0, #0
MSR BASEPRI, r0 // Enable interrupts
MSR BASEPRI, r1 // Restore original interrupt posture
#else
MRS r1, PRIMASK // Thread context returning, pickup PRIMASK
CPSIE i // Enable interrupts
MSR PRIMASK, r1 // Restore original interrupt posture
#endif
_isr_context:
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,243 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt Cortex-M55/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* expiration functions are called. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_timer_interrupt
.thumb_func
.type _tx_timer_interrupt, function
_tx_timer_interrupt:
/* Upon entry to this routine, it is assumed that the compiler scratch registers are available
for use. */
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR r1, =_tx_timer_system_clock // Pickup address of system clock
LDR r0, [r1, #0] // Pickup system clock
ADD r0, r0, #1 // Increment system clock
STR r0, [r1, #0] // Store new system clock
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
LDR r2, [r3, #0] // Pickup time-slice
CBZ r2, __tx_timer_no_time_slice // Is it non-active?
// Yes, skip time-slice processing
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
SUB r2, r2, #1 // Decrement the time-slice
STR r2, [r3, #0] // Store new time-slice value
/* Check for expiration. */
// if (__tx_timer_time_slice == 0)
CBNZ r2, __tx_timer_no_time_slice // Has it expired?
// No, skip expiration processing
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
MOV r0, #1 // Build expired value
STR r0, [r3, #0] // Set time-slice expiration flag
// }
__tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
LDR r0, [r1, #0] // Pickup current timer
LDR r2, [r0, #0] // Pickup timer list entry
CBZ r2, __tx_timer_no_timer // Is there anything in the list?
// No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR r3, =_tx_timer_expired // Pickup expiration flag address
MOV r2, #1 // Build expired value
STR r2, [r3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD r0, r0, #4 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR r3, =_tx_timer_list_end // Pickup addr of timer list end
LDR r2, [r3, #0] // Pickup list end
CMP r0, r2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR r3, =_tx_timer_list_start // Pickup addr of timer list start
LDR r0, [r3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR r0, [r1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR r2, [r3, #0] // Pickup time-slice expired flag
CBNZ r2, __tx_something_expired // Did a time-slice expire?
// If non-zero, time-slice expired
LDR r1, =_tx_timer_expired // Pickup addr of other expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_nothing_expired // Did a timer expire?
// No, nothing expired
__tx_something_expired:
PUSH {r0, lr} // Save the lr register on the stack
// and save r0 just to keep 8-byte alignment
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR r1, =_tx_timer_expired // Pickup addr of expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_dont_activate // Check for timer expiration
// If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR r2, [r3, #0] // Pickup the actual flag
CBZ r2, __tx_timer_not_ts_expiration // See if the flag is set
// No, skip time-slice processing
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
LDR r0, =_tx_thread_preempt_disable // Build address of preempt disable flag
LDR r1, [r0] // Is the preempt disable flag set?
CBNZ r1, __tx_timer_skip_time_slice // Yes, skip the PendSV logic
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r1, [r0] // Pickup the current thread pointer
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
LDR r3, [r2] // Pickup the execute thread pointer
LDR r0, =0xE000ED04 // Build address of control register
LDR r2, =0x10000000 // Build value for PendSV bit
CMP r1, r3 // Are they the same?
BEQ __tx_timer_skip_time_slice // If the same, there was no time-slice performed
STR r2, [r0] // Not the same, issue the PendSV for preemption
__tx_timer_skip_time_slice:
// }
__tx_timer_not_ts_expiration:
POP {r0, lr} // Recover lr register (r0 is just there for
// the 8-byte stack alignment
// }
__tx_timer_nothing_expired:
DSB // Complete all memory access
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack allocate */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_allocate Actual stack alloc function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_allocate(thread_ptr, stack_size);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,120 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txe_thread_secure_stack_free Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack free */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_free Actual stack free function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_free(TX_THREAD *thread_ptr)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_free(thread_ptr);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,21 @@
target_sources(${PROJECT_NAME} PRIVATE
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_allocate.c
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_free.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_initialize_low_level.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_save.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_control.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_disable.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_schedule.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_allocate.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_free.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_stack_build.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_system_return.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_timer_interrupt.S
)
target_include_directories(${PROJECT_NAME} PUBLIC
inc
)

View File

@ -0,0 +1,649 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M55/GNU */
/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M55 files. It unifies */
/* the Cortex-M55 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Function prototypes for this port. */
struct TX_THREAD_STRUCT;
UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
/* Services without error checking. */
#define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _tx_thread_secure_stack_free
#else
/* Services with error checking. */
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Initialize secure stacks for threads calling secure functions. */
extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M55/GNU Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,61 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_secure_interface.h PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file defines the ThreadX secure thread stack components, */
/* including data types and external references. */
/* It is assumed that tx_api.h and tx_port.h have already been */
/* included. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_SECURE_INTERFACE_H
#define TX_SECURE_INTERFACE_H
/* Define internal secure thread stack function prototypes. */
extern UINT _tx_thread_secure_mode_stack_initialize(void);
extern UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size);
extern UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_initialize(void);
extern void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr);
#endif

View File

@ -0,0 +1,210 @@
Microsoft's Azure RTOS ThreadX for Cortex-M55
Using the GNU Tools
1. Building the ThreadX run-time Library
Import all ThreadX common and port-specific source files into a GNU project.
Configure the project to build a library rather than an executable. This
results in the ThreadX run-time library file tx.a, which is needed by
the application.
Files tx_thread_stack_error_handler.c and tx_thread_stack_error_notify.c
replace the common files of the same name.
2. Demonstration System
No demonstration project is provided.
3. System Initialization
The entry point in ThreadX for the Cortex-M55 using gnu tools uses the standard GNU
Cortex-M55 reset sequence. From the reset vector the C runtime will be initialized.
The ThreadX tx_initialize_low_level.S file is responsible for setting up
various system data structures, the vector area, and a periodic timer interrupt
source.
In addition, _tx_initialize_low_level determines the first available
address for use by the application, which is supplied as the sole input
parameter to your application definition function, tx_application_define.
4. Register Usage and Stack Frames
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have the same stack frame in the Cortex-M55 version of
ThreadX. The top of the suspended thread's stack is pointed to by
tx_thread_stack_ptr in the associated thread control block TX_THREAD.
Non-FPU Stack Frame:
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 r4 Software stacked GP registers
0x08 r5
0x0C r6
0x10 r7
0x14 r8
0x18 r9
0x1C r10
0x20 r11
0x24 r0 Hardware stacked registers
0x28 r1
0x2C r2
0x30 r3
0x34 r12
0x38 lr
0x3C pc
0x40 xPSR
FPU Stack Frame (only interrupted thread with FPU enabled):
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 s16 Software stacked FPU registers
0x08 s17
0x0C s18
0x10 s19
0x14 s20
0x18 s21
0x1C s22
0x20 s23
0x24 s24
0x28 s25
0x2C s26
0x30 s27
0x34 s28
0x38 s29
0x3C s30
0x40 s31
0x44 r4 Software stacked registers
0x48 r5
0x4C r6
0x50 r7
0x54 r8
0x58 r9
0x5C r10
0x60 r11
0x64 r0 Hardware stacked registers
0x68 r1
0x6C r2
0x70 r3
0x74 r12
0x78 lr
0x7C pc
0x80 xPSR
0x84 s0 Hardware stacked FPU registers
0x88 s1
0x8C s2
0x90 s3
0x94 s4
0x98 s5
0x9C s6
0xA0 s7
0xA4 s8
0xA8 s9
0xAC s10
0xB0 s11
0xB4 s12
0xB8 s13
0xBC s14
0xC0 s15
0xC4 fpscr
5. Improving Performance
To make ThreadX and the application(s) run faster, you can enable
all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
6. Interrupt Handling
ThreadX provides complete and high-performance interrupt handling for Cortex-M55
targets. There are a certain set of requirements that are defined in the
following sub-sections:
6.1 Vector Area
The Cortex-M55 vectors start at the label __tx_vectors or similar. The application may modify
the vector area according to its needs. There is code in tx_initialize_low_level() that will
configure the vector base register.
6.2 Managed Interrupts
ISRs can be written completely in C (or assembly language) without any calls to
_tx_thread_context_save or _tx_thread_context_restore. These ISRs are allowed access to the
ThreadX API that is available to ISRs.
ISRs written in C will take the form (where "your_C_isr" is an entry in the vector table):
void your_C_isr(void)
{
/* ISR processing goes here, including any needed function calls. */
}
ISRs written in assembly language will take the form:
.global your_assembly_isr
.thumb_func
your_assembly_isr:
PUSH {r0, lr}
;
; /* Do interrupt handler work here */
; /* BL <your interrupt routine in C> */
POP {r0, lr}
BX lr
Note: the Cortex-M55 requires exception handlers to be thumb labels, this implies bit 0 set.
To accomplish this, the declaration of the label has to be preceded by the assembler directive
.thumb_func to instruct the linker to create thumb labels. The label __tx_IntHandler needs to
be inserted in the correct location in the interrupt vector table. This table is typically
located in either your runtime startup file or in the tx_initialize_low_level.S file.
7. FPU Support
ThreadX for Cortex-M55 supports automatic ("lazy") VFP support, which means that applications threads
can simply use the VFP and ThreadX automatically maintains the VFP registers as part of the thread
context.
8. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06-02-2021 Release 6.1.7 changes:
tx_thread_secure_stack_initialize.S New file
tx_thread_schedule.S Added secure stack initialize to SVC hander
tx_thread_secure_stack.c Fixed stack pointer save, initialize in handler mode
04-02-2021 Release 6.1.6 changes:
tx_port.h Updated macro definition
tx_thread_schedule.s Added low power support
03-02-2021 The following files were changed/added for version 6.1.5:
tx_port.h Added ULONG64_DEFINED
09-30-2020 Initial ThreadX 6.1 version for Cortex-M55 using GNU tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@ -0,0 +1,278 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
SYSTEM_CLOCK = 6000000
SYSTICK_CYCLES = ((SYSTEM_CLOCK / 100) -1)
/* Setup the stack and heap areas. */
STACK_SIZE = 0x00000400
HEAP_SIZE = 0x00000000
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M55/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_initialize_low_level
.thumb_func
.type _tx_initialize_low_level, function
_tx_initialize_low_level:
/* Disable interrupts during ThreadX initialization. */
CPSID i
/* Set base of available memory to end of non-initialised RAM area. */
LDR r0, =_tx_initialize_unused_memory // Build address of unused memory pointer
LDR r1, =__RAM_segment_used_end__ // Build first free address
ADD r1, r1, #4 //
STR r1, [r0] // Setup first unused memory pointer
/* Setup Vector Table Offset Register. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =_vectors // Pickup address of vector table
STR r1, [r0, #0xD08] // Set vector table address
/* Enable the cycle count register. */
LDR r0, =0xE0001000 // Build address of DWT register
LDR r1, [r0] // Pickup the current value
ORR r1, r1, #1 // Set the CYCCNTENA bit
STR r1, [r0] // Enable the cycle count register
/* Set system stack pointer from vector value. */
LDR r0, =_tx_thread_system_stack_ptr // Build address of system stack pointer
LDR r1, =_vectors // Pickup address of vector table
LDR r1, [r1] // Pickup reset stack pointer
STR r1, [r0] // Save system stack pointer
/* Configure SysTick. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =SYSTICK_CYCLES
STR r1, [r0, #0x14] // Setup SysTick Reload Value
MOV r1, #0x7 // Build SysTick Control Enable Value
STR r1, [r0, #0x10] // Setup SysTick Control
/* Configure handler priorities. */
LDR r1, =0x00000000 // Rsrv, UsgF, BusF, MemM
STR r1, [r0, #0xD18] // Setup System Handlers 4-7 Priority Registers
LDR r1, =0xFF000000 // SVCl, Rsrv, Rsrv, Rsrv
STR r1, [r0, #0xD1C] // Setup System Handlers 8-11 Priority Registers
// Note: SVC must be lowest priority, which is 0xFF
LDR r1, =0x40FF0000 // SysT, PnSV, Rsrv, DbgM
STR r1, [r0, #0xD20] // Setup System Handlers 12-15 Priority Registers
// Note: PnSV must be lowest priority, which is 0xFF
/* Return to caller. */
BX lr
// }
/* Define shells for each of the unused vectors. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_BadHandler
.thumb_func
.type __tx_BadHandler, function
__tx_BadHandler:
B __tx_BadHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_IntHandler
.thumb_func
.type __tx_IntHandler, function
__tx_IntHandler:
// VOID InterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
/* Do interrupt handler work here */
/* .... */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SysTick_Handler
.thumb_func
.type SysTick_Handler, function
SysTick_Handler:
// VOID TimerInterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
BL _tx_timer_interrupt
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global HardFault_Handler
.thumb_func
.type HardFault_Handler, function
HardFault_Handler:
B HardFault_Handler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global UsageFault_Handler
.thumb_func
.type UsageFault_Handler, function
UsageFault_Handler:
CPSID i // Disable interrupts
// Check for stack limit fault
LDR r0, =0xE000ED28 // CFSR address
LDR r1,[r0] // Pick up CFSR
TST r1, #0x00100000 // Check for Stack Overflow
_unhandled_usage_loop:
BEQ _unhandled_usage_loop // If not stack overflow then loop
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARM_FP
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
STR r1, [r0] // Store the value
#endif
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r0,[r0] // Pick up current thread pointer
PUSH {r0,lr} // Save LR (and r0 to maintain stack alignment)
BL _tx_thread_stack_error_handler // Call ThreadX/user handler
POP {r0,lr} // Restore LR and dummy reg
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
// Call the thread exit function to indicate the thread is no longer executing.
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#endif
MOV r1, #0 // Build NULL value
LDR r0, =_tx_thread_current_ptr // Pickup address of current thread pointer
STR r1, [r0] // Clear current thread pointer
// Return from UsageFault_Handler exception
LDR r0, =0xE000ED04 // Load ICSR
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
CPSIE i // Enable interrupts
BX lr // Return from exception
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_NMIHandler
.thumb_func
.type __tx_NMIHandler, function
__tx_NMIHandler:
B __tx_NMIHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_DBGHandler
.thumb_func
.type __tx_DBGHandler, function
__tx_DBGHandler:
B __tx_DBGHandler
.end

View File

@ -0,0 +1,719 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
.global __aeabi_memset
.global _tx_thread_current_ptr
.global _tx_thread_interrupt_disable
.global _tx_thread_interrupt_restore
.global _tx_thread_stack_analyze
.global _tx_thread_stack_error_handler
.global _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_trace_buffer_current_ptr
.global _tx_trace_buffer_end_ptr
.global _tx_trace_buffer_start_ptr
.global _tx_trace_event_enable_bits
.global _tx_trace_full_notify_function
.global _tx_trace_header_ptr
#endif
.global _tx_misra_always_true
.global _tx_misra_block_pool_to_uchar_pointer_convert
.global _tx_misra_byte_pool_to_uchar_pointer_convert
.global _tx_misra_char_to_uchar_pointer_convert
.global _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_entry_to_uchar_pointer_convert
#endif
.global _tx_misra_indirect_void_to_uchar_pointer_convert
.global _tx_misra_memset
.global _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_object_to_uchar_pointer_convert
#endif
.global _tx_misra_pointer_to_ulong_convert
.global _tx_misra_status_get
.global _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_time_stamp_get
#endif
.global _tx_misra_timer_indirect_to_void_pointer_convert
.global _tx_misra_timer_pointer_add
.global _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_trace_event_insert
#endif
.global _tx_misra_uchar_pointer_add
.global _tx_misra_uchar_pointer_dif
.global _tx_misra_uchar_pointer_sub
.global _tx_misra_uchar_to_align_type_pointer_convert
.global _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_entry_pointer_convert
.global _tx_misra_uchar_to_header_pointer_convert
#endif
.global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
.global _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_object_pointer_convert
#endif
.global _tx_misra_uchar_to_void_pointer_convert
.global _tx_misra_ulong_pointer_add
.global _tx_misra_ulong_pointer_dif
.global _tx_misra_ulong_pointer_sub
.global _tx_misra_ulong_to_pointer_convert
.global _tx_misra_ulong_to_thread_pointer_convert
.global _tx_misra_user_timer_pointer_get
.global _tx_misra_void_to_block_pool_pointer_convert
.global _tx_misra_void_to_byte_pool_pointer_convert
.global _tx_misra_void_to_event_flags_pointer_convert
.global _tx_misra_void_to_indirect_uchar_pointer_convert
.global _tx_misra_void_to_mutex_pointer_convert
.global _tx_misra_void_to_queue_pointer_convert
.global _tx_misra_void_to_semaphore_pointer_convert
.global _tx_misra_void_to_thread_pointer_convert
.global _tx_misra_void_to_uchar_pointer_convert
.global _tx_misra_void_to_ulong_pointer_convert
.global _tx_misra_ipsr_get
.global _tx_misra_control_get
.global _tx_misra_control_set
#ifdef __ARM_FP
.global _tx_misra_fpccr_get
.global _tx_misra_vfp_touch
#endif
.global _tx_misra_event_flags_group_not_used
.global _tx_misra_event_flags_set_notify_not_used
.global _tx_misra_queue_not_used
.global _tx_misra_queue_send_notify_not_used
.global _tx_misra_semaphore_not_used
.global _tx_misra_semaphore_put_notify_not_used
.global _tx_misra_thread_entry_exit_notify_not_used
.global _tx_misra_thread_not_used
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
.thumb_func
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
.text
.thumb_func
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N _tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N _tx_misra_message_copy_1
_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N _tx_misra_message_copy_2
_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR R2,=0x54485244
CMP R1,R2
BNE.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_3
_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N _tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N _tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N _tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N _tx_misra_trace_event_insert_4
LDR.N R0,DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N _tx_misra_trace_event_insert_0
_tx_misra_trace_event_insert_4:
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
.data
DataTable2_1:
.word _tx_trace_buffer_current_ptr
.data
DataTable2_2:
.word _tx_trace_event_enable_bits
.data
DataTable2_5:
.word _tx_trace_buffer_end_ptr
.data
DataTable2_6:
.word _tx_trace_buffer_start_ptr
.data
DataTable2_7:
.word _tx_trace_header_ptr
.data
DataTable2_8:
.word _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
.data
DataTable2_3:
.word _tx_thread_system_state
.data
DataTable2_4:
.word _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARM_FP
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
.data
.word 0

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_restore
.thumb_func
.type _tx_thread_context_restore, function
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_enter
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_save
.thumb_func
.type _tx_thread_context_save, function
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,82 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
.section .text
.balign 4
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_control
.thumb_func
.type _tx_thread_interrupt_control, function
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_disable
.thumb_func
.type _tx_thread_interrupt_disable, function
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }
.end

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_restore
.thumb_func
.type _tx_thread_interrupt_restore, function
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }
.end

View File

@ -0,0 +1,391 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M55/GNU */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 04-02-2021 Scott Larson Modified comment(s), added */
/* low power code, */
/* resulting in version 6.1.6 */
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_schedule
.thumb_func
.type _tx_thread_schedule, function
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARM_FP
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global PendSV_Handler
.thumb_func
.type PendSV_Handler, function
/* Get current thread value and new thread pointer. */
PendSV_Handler:
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
STR r12, [r1, #8] // Save the thread stack pointer
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Save secure context
LDR r5, [r1,#0x90] // Load secure stack index
CBZ r5, _skip_secure_save // Skip save if there is no secure context
PUSH {r0,r1,r2,r3} // Save scratch registers
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_save // Save secure stack
POP {r0,r1,r2,r3} // Restore secure registers
_skip_secure_save:
#endif
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Restore secure context
LDR r0, [r1,#0x90] // Load secure stack index
CBZ r0, _skip_secure_restore // Skip restore if there is no secure context
PUSH {r0,r1} // Save r1 (and dummy r0)
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_restore // Restore secure stack
POP {r0,r1} // Restore r1 (and dummy r0)
_skip_secure_restore:
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #12] // Get stack start
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// SVC_Handler is not needed when ThreadX is running in single mode.
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SVC_Handler
.thumb_func
.type SVC_Handler, function
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ r0, MSP // Get MSP if return stack is MSP
MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r1, [r1,#-2] // Load SVC number
CMP r1, #1 // Is it a secure stack allocate request?
BEQ _tx_svc_secure_alloc // Yes, go there
CMP r1, #2 // Is it a secure stack free request?
BEQ _tx_svc_secure_free // Yes, go there
CMP r1, #3 // Is it a secure stack init request?
BEQ _tx_svc_secure_init // Yes, go there
// Unknown SVC argument - just return
BX lr
_tx_svc_secure_alloc:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_allocate
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_free:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_free
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_init:
PUSH {r0,lr} // Save SP and EXC_RETURN
BL _tx_thread_secure_mode_stack_initialize
POP {r12,lr} // Restore SP and EXC_RETURN
BX lr
#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_vfp_access
.thumb_func
.type _tx_vfp_access, function
_tx_vfp_access:
#if TX_ENABLE_FPU_SUPPORT
VMOV.F32 s0, s0 // Simply access the VFP
#endif
BX lr // Return to caller
.end

View File

@ -0,0 +1,596 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_SOURCE_CODE
#include "tx_secure_interface.h" /* Interface for NS code. */
/* Minimum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MINIMUM
#define TX_THREAD_SECURE_STACK_MINIMUM 256
#endif
/* Maximum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MAXIMUM
#define TX_THREAD_SECURE_STACK_MAXIMUM 1024
#endif
/* 8 bytes added to stack size to "seal" stack. */
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
VOID *tx_thread_secure_stack_ptr; /* Thread's secure stack current pointer */
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M55/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function initializes secure mode to use PSP stack. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* status */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Change name, execute in */
/* handler mode, */
/* disable optimizations, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry, optimize(0)))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
ULONG control;
ULONG ipsr;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
else
{
/* Set secure mode to use PSP. */
asm volatile("MRS %0, CONTROL" : "=r" (control)); /* Get CONTROL register. */
control |= 2; /* Use PSP. */
asm volatile("MSR CONTROL, %0" :: "r" (control)); /* Set CONTROL register. */
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M55/GNU */
/* 6.1.11a */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function allocates a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of stack to allocates */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_SIZE_ERROR Invalid stack size */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* malloc Compiler's malloc function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* 05-02-2022 Scott Larson Modified comment(s), added */
/* TX_INTERRUPT_SAVE_AREA, */
/* resulting in version 6.1.11a*/
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
ULONG ipsr;
ULONG psplim_ns;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
else if (stack_size < TX_THREAD_SECURE_STACK_MINIMUM || stack_size > TX_THREAD_SECURE_STACK_MAXIMUM)
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
info_ptr -> tx_thread_secure_stack_limit = stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
asm volatile("MRS %0, PSPLIM_NS" : "=r" (psplim_ns)); /* Get PSPLIM_NS register. */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == psplim_ns)
{
/* If this thread is running, set Secure PSP and PSPLIM. */
asm volatile("MSR PSPLIM, %0" :: "r" ((ULONG)(info_ptr -> tx_thread_secure_stack_limit)));
asm volatile("MSR PSP, %0" :: "r" ((ULONG)(info_ptr -> tx_thread_secure_stack_ptr)));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M55/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function frees a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M55/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
asm volatile("MRS %0, PSP" : "=r" (sp)); /* Get PSP register. */
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
return;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M55/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
asm volatile("MSR PSPLIM, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_limit));
asm volatile("MSR PSP, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_ptr));
return;
}
#endif

View File

@ -0,0 +1,85 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to allocate a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 1 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_allocate
.thumb_func
.type _tx_thread_secure_stack_allocate, function
_tx_thread_secure_stack_allocate:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 1
CMP r3, #0 // If interrupts enabled, just return
BEQ _alloc_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_alloc_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_free Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to free a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 2 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_free(TX_THREAD *thread_ptr)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_free
.thumb_func
.type _tx_thread_secure_stack_free, function
_tx_thread_secure_stack_free:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 2
CMP r3, #0 // If interrupts enabled, just return
BEQ _free_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_free_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_initialize Cortex-M55/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to initialize a secure stack. */
/* */
/* INPUT */
/* */
/* none */
/* */
/* OUTPUT */
/* */
/* none */
/* */
/* CALLS */
/* */
/* SVC 3 */
/* */
/* CALLED BY */
/* */
/* TX_INITIALIZE_KERNEL_ENTER_EXTENSION */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_secure_stack_initialize(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_initialize
.thumb_func
.type _tx_thread_secure_stack_initialize, function
_tx_thread_secure_stack_initialize:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
CPSIE i // Enable interrupts for SVC call
SVC 3
CPSID i // Disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
BX lr
.end

View File

@ -0,0 +1,140 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_stack_build
.thumb_func
.type _tx_thread_stack_build, function
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-M should look like the following after it is built:
Stack Top:
LR Interrupted LR (LR at time of PENDSV)
r4 Initial value for r4
r5 Initial value for r5
r6 Initial value for r6
r7 Initial value for r7
r8 Initial value for r8
r9 Initial value for r9
r10 Initial value for r10
r11 Initial value for r11
r0 Initial value for r0 (Hardware stack starts here!!)
r1 Initial value for r1
r2 Initial value for r2
r3 Initial value for r3
r12 Initial value for r12
lr Initial value for lr
pc Initial value for pc
xPSR Initial value for xPSR
Stack Bottom: (higher memory address) */
LDR r2, [r0, #16] // Pickup end of stack area
BIC r2, r2, #0x7 // Align frame for 8-byte alignment
SUB r2, r2, #68 // Subtract frame size
#ifdef TX_SINGLE_MODE_SECURE
LDR r3, =0xFFFFFFFD // Build initial LR value for secure mode
#else
LDR r3, =0xFFFFFFBC // Build initial LR value to return to non-secure PSP
#endif
STR r3, [r2, #0] // Save on the stack
/* Actually build the stack frame. */
MOV r3, #0 // Build initial register value
STR r3, [r2, #4] // Store initial r4
STR r3, [r2, #8] // Store initial r5
STR r3, [r2, #12] // Store initial r6
STR r3, [r2, #16] // Store initial r7
STR r3, [r2, #20] // Store initial r8
STR r3, [r2, #24] // Store initial r9
STR r3, [r2, #28] // Store initial r10
STR r3, [r2, #32] // Store initial r11
/* Hardware stack follows. */
STR r3, [r2, #36] // Store initial r0
STR r3, [r2, #40] // Store initial r1
STR r3, [r2, #44] // Store initial r2
STR r3, [r2, #48] // Store initial r3
STR r3, [r2, #52] // Store initial r12
MOV r3, #0xFFFFFFFF // Poison EXC_RETURN value
STR r3, [r2, #56] // Store initial lr
STR r1, [r2, #60] // Store initial pc
MOV r3, #0x01000000 // Only T-bit need be set
STR r3, [r2, #64] // Store initial xPSR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = r2;
STR r2, [r0, #8] // Save stack pointer in thread's
// control block
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,96 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_system_return
.thumb_func
.type _tx_thread_system_return, function
_tx_thread_system_return:
/* Return to real scheduler via PendSV. Note that this routine is often
replaced with in-line assembly in tx_port.h to improved performance. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
MRS r0, IPSR // Pickup IPSR
CMP r0, #0 // Is it a thread returning?
BNE _isr_context // If ISR, skip interrupt enable
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Thread context returning, pickup BASEPRI
MOV r0, #0
MSR BASEPRI, r0 // Enable interrupts
MSR BASEPRI, r1 // Restore original interrupt posture
#else
MRS r1, PRIMASK // Thread context returning, pickup PRIMASK
CPSIE i // Enable interrupts
MSR PRIMASK, r1 // Restore original interrupt posture
#endif
_isr_context:
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,243 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt Cortex-M55/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* expiration functions are called. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_timer_interrupt
.thumb_func
.type _tx_timer_interrupt, function
_tx_timer_interrupt:
/* Upon entry to this routine, it is assumed that the compiler scratch registers are available
for use. */
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR r1, =_tx_timer_system_clock // Pickup address of system clock
LDR r0, [r1, #0] // Pickup system clock
ADD r0, r0, #1 // Increment system clock
STR r0, [r1, #0] // Store new system clock
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
LDR r2, [r3, #0] // Pickup time-slice
CBZ r2, __tx_timer_no_time_slice // Is it non-active?
// Yes, skip time-slice processing
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
SUB r2, r2, #1 // Decrement the time-slice
STR r2, [r3, #0] // Store new time-slice value
/* Check for expiration. */
// if (__tx_timer_time_slice == 0)
CBNZ r2, __tx_timer_no_time_slice // Has it expired?
// No, skip expiration processing
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
MOV r0, #1 // Build expired value
STR r0, [r3, #0] // Set time-slice expiration flag
// }
__tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
LDR r0, [r1, #0] // Pickup current timer
LDR r2, [r0, #0] // Pickup timer list entry
CBZ r2, __tx_timer_no_timer // Is there anything in the list?
// No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR r3, =_tx_timer_expired // Pickup expiration flag address
MOV r2, #1 // Build expired value
STR r2, [r3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD r0, r0, #4 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR r3, =_tx_timer_list_end // Pickup addr of timer list end
LDR r2, [r3, #0] // Pickup list end
CMP r0, r2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR r3, =_tx_timer_list_start // Pickup addr of timer list start
LDR r0, [r3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR r0, [r1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR r2, [r3, #0] // Pickup time-slice expired flag
CBNZ r2, __tx_something_expired // Did a time-slice expire?
// If non-zero, time-slice expired
LDR r1, =_tx_timer_expired // Pickup addr of other expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_nothing_expired // Did a timer expire?
// No, nothing expired
__tx_something_expired:
PUSH {r0, lr} // Save the lr register on the stack
// and save r0 just to keep 8-byte alignment
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR r1, =_tx_timer_expired // Pickup addr of expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_dont_activate // Check for timer expiration
// If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR r2, [r3, #0] // Pickup the actual flag
CBZ r2, __tx_timer_not_ts_expiration // See if the flag is set
// No, skip time-slice processing
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
LDR r0, =_tx_thread_preempt_disable // Build address of preempt disable flag
LDR r1, [r0] // Is the preempt disable flag set?
CBNZ r1, __tx_timer_skip_time_slice // Yes, skip the PendSV logic
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r1, [r0] // Pickup the current thread pointer
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
LDR r3, [r2] // Pickup the execute thread pointer
LDR r0, =0xE000ED04 // Build address of control register
LDR r2, =0x10000000 // Build value for PendSV bit
CMP r1, r3 // Are they the same?
BEQ __tx_timer_skip_time_slice // If the same, there was no time-slice performed
STR r2, [r0] // Not the same, issue the PendSV for preemption
__tx_timer_skip_time_slice:
// }
__tx_timer_not_ts_expiration:
POP {r0, lr} // Recover lr register (r0 is just there for
// the 8-byte stack alignment
// }
__tx_timer_nothing_expired:
DSB // Complete all memory access
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack allocate */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_allocate Actual stack alloc function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_allocate(thread_ptr, stack_size);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,120 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txe_thread_secure_stack_free Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack free */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_free Actual stack free function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_free(TX_THREAD *thread_ptr)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_free(thread_ptr);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,649 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M55/IAR */
/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M55 files. It unifies */
/* the Cortex-M55 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Function prototypes for this port. */
struct TX_THREAD_STRUCT;
UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
/* Services without error checking. */
#define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _tx_thread_secure_stack_free
#else
/* Services with error checking. */
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Initialize secure stacks for threads calling secure functions. */
extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M55/IAR Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,61 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_secure_interface.h PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file defines the ThreadX secure thread stack components, */
/* including data types and external references. */
/* It is assumed that tx_api.h and tx_port.h have already been */
/* included. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_SECURE_INTERFACE_H
#define TX_SECURE_INTERFACE_H
/* Define internal secure thread stack function prototypes. */
extern UINT _tx_thread_secure_mode_stack_initialize(void);
extern UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size);
extern UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_initialize(void);
extern void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr);
#endif

View File

@ -0,0 +1,221 @@
Microsoft's Azure RTOS ThreadX for Cortex-M55
Using the IAR Tools
1. Building the ThreadX run-time Library
Import all ThreadX common and port-specific source files into an IAR project.
Configure the project to build a library rather than an executable. This
results in the ThreadX run-time library file tx.a, which is needed by
the application.
Files tx_thread_stack_error_handler.c and tx_thread_stack_error_notify.c
replace the common files of the same name.
2. Demonstration System
No demonstration is provided because the IAR EWARM 8.50 simulator does
not simulate the Cortex-M55 correctly.
3. System Initialization
The entry point in ThreadX for the Cortex-M55 using IAR tools is at label
__iar_program_start. This is defined within the IAR compiler's startup code.
In addition, this is where all static and global preset C variable
initialization processing takes place.
The ThreadX tx_initialize_low_level.s file is responsible for setting up
various system data structures, and a periodic timer interrupt source.
The _tx_initialize_low_level function inside of tx_initialize_low_level.s
also determines the first available address for use by the application, which
is supplied as the sole input parameter to your application definition function,
tx_application_define. To accomplish this, a section is created in
tx_initialize_low_level.s called FREE_MEM, which must be located after all
other RAM sections in memory.
4. Register Usage and Stack Frames
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have the same stack frame in the Cortex-M55 version of
ThreadX. The top of the suspended thread's stack is pointed to by
tx_thread_stack_ptr in the associated thread control block TX_THREAD.
Non-FPU Stack Frame:
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 r4 Software stacked GP registers
0x08 r5
0x0C r6
0x10 r7
0x14 r8
0x18 r9
0x1C r10
0x20 r11
0x24 r0 Hardware stacked registers
0x28 r1
0x2C r2
0x30 r3
0x34 r12
0x38 lr
0x3C pc
0x40 xPSR
FPU Stack Frame (only interrupted thread with FPU enabled):
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 s16 Software stacked FPU registers
0x08 s17
0x0C s18
0x10 s19
0x14 s20
0x18 s21
0x1C s22
0x20 s23
0x24 s24
0x28 s25
0x2C s26
0x30 s27
0x34 s28
0x38 s29
0x3C s30
0x40 s31
0x44 r4 Software stacked registers
0x48 r5
0x4C r6
0x50 r7
0x54 r8
0x58 r9
0x5C r10
0x60 r11
0x64 r0 Hardware stacked registers
0x68 r1
0x6C r2
0x70 r3
0x74 r12
0x78 lr
0x7C pc
0x80 xPSR
0x84 s0 Hardware stacked FPU registers
0x88 s1
0x8C s2
0x90 s3
0x94 s4
0x98 s5
0x9C s6
0xA0 s7
0xA4 s8
0xA8 s9
0xAC s10
0xB0 s11
0xB4 s12
0xB8 s13
0xBC s14
0xC0 s15
0xC4 fpscr
5. Improving Performance
To make ThreadX and the application(s) run faster, you can enable
all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
6. Interrupt Handling
The Cortex-M55 vectors start at the label __vector_table and is typically defined in a
startup.s file (or similar). The application may modify the vector area according to its needs.
6.1 Managed Interrupts
ISRs for Cortex-M using the IAR tools can be written completely in C (or assembly
language) without any calls to _tx_thread_context_save or _tx_thread_context_restore.
These ISRs are allowed access to the ThreadX API that is available to ISRs.
ISRs written in C will take the form (where "your_C_isr" is an entry in the vector table):
void your_C_isr(void)
{
/* ISR processing goes here, including any needed function calls. */
}
ISRs written in assembly language will take the form:
PUBLIC your_assembly_isr
your_assembly_isr:
PUSH {r0, lr}
; ISR processing goes here, including any needed function calls.
POP {r0, lr}
BX lr
7. IAR Thread-safe Library Support
Thread-safe support for the IAR tools is easily enabled by building the ThreadX library
and the application with TX_ENABLE_IAR_LIBRARY_SUPPORT. Also, the linker control file
should have the following line added (if not already in place):
initialize by copy with packing = none { section __DLIB_PERTHREAD }; // Required in a multi-threaded application
7. IAR Thread-safe Library Support
Thread-safe support for the IAR tools is easily enabled by building the ThreadX library
and the application with TX_ENABLE_IAR_LIBRARY_SUPPORT. Also, the linker control file
should have the following line added (if not already in place):
initialize by copy with packing = none { section __DLIB_PERTHREAD }; // Required in a multi-threaded application
The project options "General Options -> Library Configuration" should also have the
"Enable thread support in library" box selected.
8. VFP Support
ThreadX for Cortex-M55 supports automatic ("lazy") VFP support, which means that applications threads
can simply use the VFP and ThreadX automatically maintains the VFP registers as part of the thread
context.
9. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06-02-2021 Release 6.1.7 changes:
tx_thread_secure_stack_initialize.s New file
tx_thread_schedule.s Added secure stack initialize to SVC hander
tx_thread_secure_stack.c Fixed stack pointer save, initialize in handler mode
04-02-2021 Release 6.1.6 changes:
tx_port.h Updated macro definition
tx_thread_schedule.s Added low power support
03-02-2021 The following files were changed/added for version 6.1.5:
tx_port.h Added ULONG64_DEFINED
09-30-2020 Initial ThreadX 6.1 version for Cortex-M55 using IAR's ARM tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@ -0,0 +1,804 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** IAR Multithreaded Library Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Define IAR library for tools prior to version 8. */
#if (__VER__ < 8000000)
/* IAR version 7 and below. */
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
#include "tx_mutex.h"
/* This implementation requires that the following macros are defined in the
tx_port.h file and <yvals.h> is included with the following code segments:
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#else
#define TX_THREAD_EXTENSION_2
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = __iar_dlib_perthread_allocate();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) __iar_dlib_perthread_deallocate(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL;
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION __iar_dlib_perthread_access(0);
#else
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
This should be done automatically if TX_ENABLE_IAR_LIBRARY_SUPPORT is defined while building the ThreadX library and the
application.
Finally, the project options General Options -> Library Configuration should have the "Enable thread support in library" box selected.
*/
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#if _MULTI_THREAD
TX_MUTEX __tx_iar_system_lock_mutexes[_MAX_LOCK];
UINT __tx_iar_system_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_system_lock_no_mutexes;
UINT __tx_iar_system_lock_internal_errors;
UINT __tx_iar_system_lock_isr_caller;
/* Define the TLS access function for the IAR library. */
void _DLIB_TLS_MEMORY *__iar_dlib_perthread_access(void _DLIB_TLS_MEMORY *symbp)
{
char _DLIB_TLS_MEMORY *p = 0;
/* Is there a current thread? */
if (_tx_thread_current_ptr)
p = (char _DLIB_TLS_MEMORY *) _tx_thread_current_ptr -> tx_thread_iar_tls_pointer;
else
p = (void _DLIB_TLS_MEMORY *) __segment_begin("__DLIB_PERTHREAD");
p += __IAR_DLIB_PERTHREAD_SYMBOL_OFFSET(symbp);
return (void _DLIB_TLS_MEMORY *) p;
}
/* Define mutexes for IAR library. */
void __iar_system_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_LOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_system_lock_mutexes[__tx_iar_system_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_system_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_system_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_system_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR System Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_system_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_system_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_system_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
void __iar_system_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
#if _DLIB_FILE_DESCRIPTOR
TX_MUTEX __tx_iar_file_lock_mutexes[_MAX_FLOCK];
UINT __tx_iar_file_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_file_lock_no_mutexes;
UINT __tx_iar_file_lock_internal_errors;
UINT __tx_iar_file_lock_isr_caller;
void __iar_file_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_FLOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_file_lock_mutexes[__tx_iar_file_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_file_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_file_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_file_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR File Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_file_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_file_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_file_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
void __iar_file_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
#endif /* _DLIB_FILE_DESCRIPTOR */
#endif /* _MULTI_THREAD */
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#else /* IAR version 8 and above. */
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
#include "tx_mutex.h"
/* This implementation requires that the following macros are defined in the
tx_port.h file and <yvals.h> is included with the following code segments:
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#else
#define TX_THREAD_EXTENSION_2
#endif
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = __iar_dlib_perthread_allocate();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {__iar_dlib_perthread_deallocate(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
This should be done automatically if TX_ENABLE_IAR_LIBRARY_SUPPORT is defined while building the ThreadX library and the
application.
Finally, the project options General Options -> Library Configuration should have the "Enable thread support in library" box selected.
*/
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <DLib_threads.h>
void * __aeabi_read_tp();
void* _tx_iar_create_per_thread_tls_area();
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
#pragma section="__iar_tls$$DATA"
/* Define the TLS access function for the IAR library. */
void * __aeabi_read_tp(void)
{
void *p = 0;
TX_THREAD *thread_ptr = _tx_thread_current_ptr;
if (thread_ptr)
{
p = thread_ptr->tx_thread_iar_tls_pointer;
}
else
{
p = __section_begin("__iar_tls$$DATA");
}
return p;
}
/* Define the TLS creation and destruction to use malloc/free. */
void* _tx_iar_create_per_thread_tls_area()
{
UINT tls_size = __iar_tls_size();
/* Get memory for TLS. */
void *p = malloc(tls_size);
/* Initialize TLS-area and run constructors for objects in TLS */
__iar_tls_init(p);
return p;
}
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr)
{
/* Destroy objects living in TLS */
__call_thread_dtors();
free(tls_ptr);
}
#ifndef _MAX_LOCK
#define _MAX_LOCK 4
#endif
static TX_MUTEX __tx_iar_system_lock_mutexes[_MAX_LOCK];
static UINT __tx_iar_system_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_system_lock_no_mutexes;
UINT __tx_iar_system_lock_internal_errors;
UINT __tx_iar_system_lock_isr_caller;
/* Define mutexes for IAR library. */
void __iar_system_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_LOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_system_lock_mutexes[__tx_iar_system_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_system_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_system_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_system_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR System Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_system_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_system_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_system_Mtxlock(__iar_Rmtx *m)
{
if (*m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
}
void __iar_system_Mtxunlock(__iar_Rmtx *m)
{
if (*m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_system_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_system_lock_isr_caller++;
}
}
}
#if _DLIB_FILE_DESCRIPTOR
#include <stdio.h> /* Added to get access to FOPEN_MAX */
#ifndef _MAX_FLOCK
#define _MAX_FLOCK FOPEN_MAX /* Define _MAX_FLOCK as the maximum number of open files */
#endif
TX_MUTEX __tx_iar_file_lock_mutexes[_MAX_FLOCK];
UINT __tx_iar_file_lock_next_free_mutex = 0;
/* Define error counters, just for debug purposes. */
UINT __tx_iar_file_lock_no_mutexes;
UINT __tx_iar_file_lock_internal_errors;
UINT __tx_iar_file_lock_isr_caller;
void __iar_file_Mtxinit(__iar_Rmtx *m)
{
UINT i;
UINT status;
TX_MUTEX *mutex_ptr;
/* First, find a free mutex in the list. */
for (i = 0; i < _MAX_FLOCK; i++)
{
/* Setup a pointer to the start of the next free mutex. */
mutex_ptr = &__tx_iar_file_lock_mutexes[__tx_iar_file_lock_next_free_mutex++];
/* Check for wrap-around on the next free mutex. */
if (__tx_iar_file_lock_next_free_mutex >= _MAX_LOCK)
{
/* Yes, set the free index back to 0. */
__tx_iar_file_lock_next_free_mutex = 0;
}
/* Is this mutex free? */
if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Yes, this mutex is free, get out of the loop! */
break;
}
}
/* Determine if a free mutex was found. */
if (i >= _MAX_LOCK)
{
/* Error! No more free mutexes! */
/* Increment the no mutexes error counter. */
__tx_iar_file_lock_no_mutexes++;
/* Set return pointer to NULL. */
*m = TX_NULL;
/* Return. */
return;
}
/* Now create the ThreadX mutex for the IAR library. */
status = _tx_mutex_create(mutex_ptr, "IAR File Library Lock", TX_NO_INHERIT);
/* Determine if the creation was successful. */
if (status == TX_SUCCESS)
{
/* Yes, successful creation, return mutex pointer. */
*m = (VOID *) mutex_ptr;
}
else
{
/* Increment the internal error counter. */
__tx_iar_file_lock_internal_errors++;
/* Return a NULL pointer to indicate an error. */
*m = TX_NULL;
}
}
void __iar_file_Mtxdst(__iar_Rmtx *m)
{
/* Simply delete the mutex. */
_tx_mutex_delete((TX_MUTEX *) *m);
}
void __iar_file_Mtxlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex locks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Get the mutex. */
status = _tx_mutex_get((TX_MUTEX *) *m, TX_WAIT_FOREVER);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
void __iar_file_Mtxunlock(__iar_Rmtx *m)
{
UINT status;
/* Determine the caller's context. Mutex unlocks are only available from initialization and
threads. */
if ((_tx_thread_system_state == 0) || (_tx_thread_system_state >= TX_INITIALIZE_IN_PROGRESS))
{
/* Release the mutex. */
status = _tx_mutex_put((TX_MUTEX *) *m);
/* Check the status of the mutex release. */
if (status)
{
/* Internal error, increment the counter. */
__tx_iar_file_lock_internal_errors++;
}
}
else
{
/* Increment the ISR caller error. */
__tx_iar_file_lock_isr_caller++;
}
}
#endif /* _DLIB_FILE_DESCRIPTOR */
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* IAR version 8 and above. */

View File

@ -0,0 +1,237 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
EXTERN _tx_thread_system_stack_ptr
EXTERN _tx_initialize_unused_memory
EXTERN _tx_timer_interrupt
EXTERN __main
EXTERN __vector_table
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_stack_error_handler
SYSTEM_CLOCK EQU 96000000
SYSTICK_CYCLES EQU ((SYSTEM_CLOCK / 100) -1)
RSEG FREE_MEM:DATA
PUBLIC __tx_free_memory_start
__tx_free_memory_start
DS32 4
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
// {
PUBLIC _tx_initialize_low_level
_tx_initialize_low_level:
/* Disable interrupts during ThreadX initialization. */
CPSID i
/* Set base of available memory to end of non-initialised RAM area. */
LDR r0, =_tx_initialize_unused_memory // Build address of unused memory pointer
LDR r1, =__tx_free_memory_start // Build first free address
STR r1, [r0] // Setup first unused memory pointer
/* Setup Vector Table Offset Register. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =__vector_table // Pickup address of vector table
STR r1, [r0, #0xD08] // Set vector table address
/* Enable the cycle count register. */
// LDR r0, =0xE0001000 // Build address of DWT register
// LDR r1, [r0] // Pickup the current value
// ORR r1, r1, #1 // Set the CYCCNTENA bit
// STR r1, [r0] // Enable the cycle count register
/* Set system stack pointer from vector value. */
LDR r0, =_tx_thread_system_stack_ptr // Build address of system stack pointer
LDR r1, =__vector_table // Pickup address of vector table
LDR r1, [r1] // Pickup reset stack pointer
STR r1, [r0] // Save system stack pointer
/* Configure SysTick. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =SYSTICK_CYCLES
STR r1, [r0, #0x14] // Setup SysTick Reload Value
MOV r1, #0x7 // Build SysTick Control Enable Value
STR r1, [r0, #0x10] // Setup SysTick Control
/* Configure handler priorities. */
LDR r1, =0x00000000 // Rsrv, UsgF, BusF, MemM
STR r1, [r0, #0xD18] // Setup System Handlers 4-7 Priority Registers
LDR r1, =0xFF000000 // SVCl, Rsrv, Rsrv, Rsrv
STR r1, [r0, #0xD1C] // Setup System Handlers 8-11 Priority Registers
// Note: SVC must be lowest priority, which is 0xFF
LDR r1, =0x40FF0000 // SysT, PnSV, Rsrv, DbgM
STR r1, [r0, #0xD20] // Setup System Handlers 12-15 Priority Registers
// Note: PnSV must be lowest priority, which is 0xFF
/* Return to caller. */
BX lr
// }
/* Define shells for each of the unused vectors. */
PUBLIC __tx_BadHandler
__tx_BadHandler:
B __tx_BadHandler
PUBLIC __tx_IntHandler
__tx_IntHandler:
// VOID InterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
/* Do interrupt handler work here */
/* .... */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
PUBLIC __tx_SysTickHandler
PUBLIC SysTick_Handler
SysTick_Handler:
__tx_SysTickHandler:
// VOID TimerInterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
BL _tx_timer_interrupt
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
PUBLIC HardFault_Handler
HardFault_Handler:
B HardFault_Handler
PUBLIC UsageFault_Handler
UsageFault_Handler:
CPSID i // Disable interrupts
// Check for stack limit fault
LDR r0, =0xE000ED28 // CFSR address
LDR r1,[r0] // Pick up CFSR
TST r1, #0x00100000 // Check for Stack Overflow
_unhandled_usage_loop
BEQ _unhandled_usage_loop // If not stack overflow then loop
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARMVFP__
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
STR r1, [r0] // Store the value
#endif
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r0,[r0] // Pick up current thread pointer
PUSH {r0,lr} // Save LR (and r0 to maintain stack alignment)
BL _tx_thread_stack_error_handler // Call ThreadX/user handler
POP {r0,lr} // Restore LR and dummy reg
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
// Call the thread exit function to indicate the thread is no longer executing.
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#endif
MOV r1, #0 // Build NULL value
LDR r0, =_tx_thread_current_ptr // Pickup address of current thread pointer
STR r1, [r0] // Clear current thread pointer
// Return from UsageFault_Handler exception
LDR r0, =0xE000ED04 // Load ICSR
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
CPSIE i // Enable interrupts
BX lr // Return from exception
PUBLIC __tx_NMIHandler
__tx_NMIHandler:
B __tx_NMIHandler
PUBLIC __tx_DBGHandler
__tx_DBGHandler:
B __tx_DBGHandler
END

View File

@ -0,0 +1,762 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
EXTERN __aeabi_memset
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_interrupt_disable
EXTERN _tx_thread_interrupt_restore
EXTERN _tx_thread_stack_analyze
EXTERN _tx_thread_stack_error_handler
EXTERN _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
EXTERN _tx_trace_buffer_current_ptr
EXTERN _tx_trace_buffer_end_ptr
EXTERN _tx_trace_buffer_start_ptr
EXTERN _tx_trace_event_enable_bits
EXTERN _tx_trace_full_notify_function
EXTERN _tx_trace_header_ptr
#endif
PUBLIC _tx_misra_always_true
PUBLIC _tx_misra_block_pool_to_uchar_pointer_convert
PUBLIC _tx_misra_byte_pool_to_uchar_pointer_convert
PUBLIC _tx_misra_char_to_uchar_pointer_convert
PUBLIC _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_entry_to_uchar_pointer_convert
#endif
PUBLIC _tx_misra_indirect_void_to_uchar_pointer_convert
PUBLIC _tx_misra_memset
PUBLIC _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_object_to_uchar_pointer_convert
#endif
PUBLIC _tx_misra_pointer_to_ulong_convert
PUBLIC _tx_misra_status_get
PUBLIC _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_time_stamp_get
#endif
PUBLIC _tx_misra_timer_indirect_to_void_pointer_convert
PUBLIC _tx_misra_timer_pointer_add
PUBLIC _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_trace_event_insert
#endif
PUBLIC _tx_misra_uchar_pointer_add
PUBLIC _tx_misra_uchar_pointer_dif
PUBLIC _tx_misra_uchar_pointer_sub
PUBLIC _tx_misra_uchar_to_align_type_pointer_convert
PUBLIC _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_uchar_to_entry_pointer_convert
PUBLIC _tx_misra_uchar_to_header_pointer_convert
#endif
PUBLIC _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
PUBLIC _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
PUBLIC _tx_misra_uchar_to_object_pointer_convert
#endif
PUBLIC _tx_misra_uchar_to_void_pointer_convert
PUBLIC _tx_misra_ulong_pointer_add
PUBLIC _tx_misra_ulong_pointer_dif
PUBLIC _tx_misra_ulong_pointer_sub
PUBLIC _tx_misra_ulong_to_pointer_convert
PUBLIC _tx_misra_ulong_to_thread_pointer_convert
PUBLIC _tx_misra_user_timer_pointer_get
PUBLIC _tx_misra_void_to_block_pool_pointer_convert
PUBLIC _tx_misra_void_to_byte_pool_pointer_convert
PUBLIC _tx_misra_void_to_event_flags_pointer_convert
PUBLIC _tx_misra_void_to_indirect_uchar_pointer_convert
PUBLIC _tx_misra_void_to_mutex_pointer_convert
PUBLIC _tx_misra_void_to_queue_pointer_convert
PUBLIC _tx_misra_void_to_semaphore_pointer_convert
PUBLIC _tx_misra_void_to_thread_pointer_convert
PUBLIC _tx_misra_void_to_uchar_pointer_convert
PUBLIC _tx_misra_void_to_ulong_pointer_convert
PUBLIC _tx_misra_ipsr_get
PUBLIC _tx_misra_control_get
PUBLIC _tx_misra_control_set
#ifdef __ARMVFP__
PUBLIC _tx_misra_fpccr_get
PUBLIC _tx_misra_vfp_touch
#endif
PUBLIC _tx_misra_event_flags_group_not_used
PUBLIC _tx_misra_event_flags_set_notify_not_used
PUBLIC _tx_misra_queue_not_used
PUBLIC _tx_misra_queue_send_notify_not_used
PUBLIC _tx_misra_semaphore_not_used
PUBLIC _tx_misra_semaphore_put_notify_not_used
PUBLIC _tx_misra_thread_entry_exit_notify_not_used
PUBLIC _tx_misra_thread_not_used
PUBLIC _tx_version_id
SECTION `.data`:DATA:REORDER:NOROOT(2)
DATA
// 51 CHAR _tx_version_id[100] = "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX 6.1 MISRA C Compliant *";
_tx_version_id:
DC8 43H, 6FH, 70H, 79H, 72H, 69H, 67H, 68H
DC8 74H, 20H, 28H, 63H, 29H, 20H, 31H, 39H
DC8 39H, 36H, 2DH, 32H, 30H, 31H, 38H, 20H
DC8 45H, 78H, 70H, 72H, 65H, 73H, 73H, 20H
DC8 4CH, 6FH, 67H, 69H, 63H, 20H, 49H, 6EH
DC8 63H, 2EH, 20H, 2AH, 20H, 54H, 68H, 72H
DC8 65H, 61H, 64H, 58H, 20H, 36H, 2EH, 31H
DC8 20H, 4DH, 49H, 53H, 52H, 41H, 20H, 43H
DC8 20H, 43H, 6FH, 6DH, 70H, 6CH, 69H, 61H
DC8 6EH, 74H, 20H, 2AH, 0
DC8 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N ??_tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N ??_tx_misra_message_copy_1
??_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
??_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N ??_tx_misra_message_copy_2
??_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N ??_tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR.N R2,??DataTable2 // 0x54485244
CMP R1,R2
BNE.N ??_tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N ??_tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
??_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N ??_tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N ??_tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N ??_tx_misra_thread_stack_check_3
??_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
??_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N ??_tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
??_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,??DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N ??_tx_misra_trace_event_insert_0
LDR.N R5,??DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N ??_tx_misra_trace_event_insert_0
LDR.N R5,??DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,??DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N ??_tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N ??_tx_misra_trace_event_insert_2
??_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N ??_tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N ??_tx_misra_trace_event_insert_2
??_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
??_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,??DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N ??_tx_misra_trace_event_insert_4
LDR.N R0,??DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,??DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,??DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,??DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N ??_tx_misra_trace_event_insert_0
LDR.N R0,??DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,??DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N ??_tx_misra_trace_event_insert_0
??_tx_misra_trace_event_insert_4:
LDR.N R0,??DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,??DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
??_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_1:
DC32 _tx_trace_buffer_current_ptr
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_2:
DC32 _tx_trace_event_enable_bits
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_5:
DC32 _tx_trace_buffer_end_ptr
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_6:
DC32 _tx_trace_buffer_start_ptr
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_7:
DC32 _tx_trace_header_ptr
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_8:
DC32 _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2:
DC32 0x54485244
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_3:
DC32 _tx_thread_system_state
SECTION `.text`:CODE:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
??DataTable2_4:
DC32 _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARMVFP__
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
SECTION `.text`:CODE:NOROOT(1)
THUMB
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
SECTION `.iar_vfe_header`:DATA:NOALLOC:NOROOT(2)
SECTION_TYPE SHT_PROGBITS, 0
DATA
DC32 0
END

View File

@ -0,0 +1,77 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
EXTERN _tx_execution_isr_exit
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
PUBLIC _tx_thread_context_restore
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
END

View File

@ -0,0 +1,77 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
EXTERN _tx_execution_isr_enter
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
PUBLIC _tx_thread_context_save
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
END

View File

@ -0,0 +1,78 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
PUBLIC _tx_thread_interrupt_control
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }
END

View File

@ -0,0 +1,78 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
PUBLIC _tx_thread_interrupt_disable
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }
END

View File

@ -0,0 +1,75 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
PUBLIC _tx_thread_interrupt_restore
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }
END

View File

@ -0,0 +1,379 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_execute_ptr
EXTERN _tx_timer_time_slice
EXTERN _tx_thread_system_stack_ptr
EXTERN _tx_thread_preempt_disable
EXTERN _tx_execution_thread_enter
EXTERN _tx_execution_thread_exit
EXTERN _tx_thread_secure_stack_context_restore
EXTERN _tx_thread_secure_stack_context_save
EXTERN _tx_thread_secure_mode_stack_allocate
EXTERN _tx_thread_secure_mode_stack_free
EXTERN _tx_thread_secure_mode_stack_initialize
#ifdef TX_LOW_POWER
EXTERN tx_low_power_enter
EXTERN tx_low_power_exit
#endif
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M55/IAR */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 04-02-2021 Scott Larson Modified comment(s), added */
/* low power code, */
/* resulting in version 6.1.6 */
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
PUBLIC _tx_thread_schedule
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARMVFP__
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
PUBLIC PendSV_Handler
PendSV_Handler:
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARMVFP__
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
STR r12, [r1, #8] // Save the thread stack pointer
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Save secure context
LDR r5, [r1,#0x90] // Load secure stack index
CBZ r5, _skip_secure_save // Skip save if there is no secure context
PUSH {r0,r1,r2,r3} // Save scratch registers
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_save // Save secure stack
POP {r0,r1,r2,r3} // Restore secure registers
_skip_secure_save:
#endif
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Restore secure context
LDR r0, [r1,#0x90] // Load secure stack index
CBZ r0, _skip_secure_restore // Skip restore if there is no secure context
PUSH {r0,r1} // Save r1 (and dummy r0)
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_restore // Restore secure stack
POP {r0,r1} // Restore r1 (and dummy r0)
_skip_secure_restore:
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #12] // Get stack start
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARMVFP__
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// SVC_Handler is not needed when ThreadX is running in single mode.
PUBLIC SVC_Handler
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ r0, MSP // Get MSP if return stack is MSP
MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r1, [r1,#-2] // Load SVC number
CMP r1, #1 // Is it a secure stack allocate request?
BEQ _tx_svc_secure_alloc // Yes, go there
CMP r1, #2 // Is it a secure stack free request?
BEQ _tx_svc_secure_free // Yes, go there
CMP r1, #3 // Is it a secure stack init request?
BEQ _tx_svc_secure_init // Yes, go there
// Unknown SVC argument - just return
BX lr
_tx_svc_secure_alloc:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_allocate
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_free:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_free
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_init:
PUSH {r0,lr} // Save SP and EXC_RETURN
BL _tx_thread_secure_mode_stack_initialize
POP {r12,lr} // Restore SP and EXC_RETURN
BX lr
#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
PUBLIC _tx_vfp_access
_tx_vfp_access:
VMOV.F32 s0, s0 // Simply access the VFP
BX lr // Return to caller
END

View File

@ -0,0 +1,597 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_SOURCE_CODE
#include <cmsis_compiler.h> /* For intrinsic functions. */
#include "tx_secure_interface.h" /* Interface for NS code. */
/* Minimum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MINIMUM
#define TX_THREAD_SECURE_STACK_MINIMUM 256
#endif
/* Maximum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MAXIMUM
#define TX_THREAD_SECURE_STACK_MAXIMUM 1024
#endif
/* 8 bytes added to stack size to "seal" stack. */
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
VOID *tx_thread_secure_stack_ptr; /* Thread's secure stack current pointer */
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M55/IAR */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function initializes secure mode to use PSP stack. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* status */
/* */
/* CALLS */
/* */
/* __get_CONTROL Intrinsic to get CONTROL */
/* __set_CONTROL Intrinsic to set CONTROL */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Change name, execute in */
/* handler mode, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else
{
/* Set secure mode to use PSP. */
__set_CONTROL(__get_CONTROL() | 2);
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M55/IAR */
/* 6.1.11a */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function allocates a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of stack to allocates */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_SIZE_ERROR Invalid stack size */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* malloc Compiler's malloc function */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* __TZ_get_PSPLIM_NS Intrinsic to get NS PSP */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* 05-02-2022 Scott Larson Modified comment(s), added */
/* TX_INTERRUPT_SAVE_AREA, */
/* resulting in version 6.1.11a*/
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else if (stack_size < TX_THREAD_SECURE_STACK_MINIMUM || stack_size > TX_THREAD_SECURE_STACK_MAXIMUM)
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
info_ptr -> tx_thread_secure_stack_limit = stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == __TZ_get_PSPLIM_NS())
{
/* If this thread is running, set Secure PSP and PSPLIM. */
__set_PSPLIM((ULONG)(info_ptr -> tx_thread_secure_stack_limit));
__set_PSP((ULONG)(info_ptr -> tx_thread_secure_stack_ptr));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M55/IAR */
/* 6.1.11a */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function frees a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* 05-02-2022 Scott Larson Modified comment(s), added */
/* TX_INTERRUPT_SAVE_AREA, */
/* resulting in version 6.1.11a*/
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M55/IAR */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __get_PSP Intrinsic to get PSP */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
sp = __get_PSP();
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
return;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M55/IAR */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
__set_PSPLIM((ULONG)info_ptr -> tx_thread_secure_stack_limit);
__set_PSP ((ULONG)info_ptr -> tx_thread_secure_stack_ptr);
return;
}
#endif

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to allocate a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 1 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
// {
EXPORT _tx_thread_secure_stack_allocate
_tx_thread_secure_stack_allocate:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 1
CMP r3, #0 // If interrupts enabled, just return
BEQ _alloc_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_alloc_return_interrupt_enabled
BX lr
END

View File

@ -0,0 +1,78 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_free Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to free a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 2 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_free(TX_THREAD *thread_ptr)
// {
EXPORT _tx_thread_secure_stack_free
_tx_thread_secure_stack_free:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 2
CMP r3, #0 // If interrupts enabled, just return
BEQ _free_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_free_return_interrupt_enabled
BX lr
END

View File

@ -0,0 +1,74 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_initialize Cortex-M55/IAR */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to initialize a secure stack. */
/* */
/* INPUT */
/* */
/* none */
/* */
/* OUTPUT */
/* */
/* none */
/* */
/* CALLS */
/* */
/* SVC 3 */
/* */
/* CALLED BY */
/* */
/* TX_INITIALIZE_KERNEL_ENTER_EXTENSION */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_secure_stack_initialize(VOID)
// {
EXPORT _tx_thread_secure_stack_initialize
_tx_thread_secure_stack_initialize:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
CPSIE i // Enable interrupts for SVC call
SVC 3
CPSID i // Disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
BX lr
END

View File

@ -0,0 +1,135 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build Cortex-M55/IAR */
/* 6.1.8 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
PUBLIC _tx_thread_stack_build
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-M should look like the following after it is built:
Stack Top:
LR Interrupted LR (LR at time of PENDSV)
r4 Initial value for r4
r5 Initial value for r5
r6 Initial value for r6
r7 Initial value for r7
r8 Initial value for r8
r9 Initial value for r9
r10 Initial value for r10
r11 Initial value for r11
r0 Initial value for r0 (Hardware stack starts here!!)
r1 Initial value for r1
r2 Initial value for r2
r3 Initial value for r3
r12 Initial value for r12
lr Initial value for lr
pc Initial value for pc
xPSR Initial value for xPSR
Stack Bottom: (higher memory address) */
LDR r2, [r0, #16] // Pickup end of stack area
BIC r2, r2, #0x7 // Align frame for 8-byte alignment
SUB r2, r2, #68 // Subtract frame size
#ifdef TX_SINGLE_MODE_SECURE
LDR r3, =0xFFFFFFFD // Build initial LR value for secure mode
#else
LDR r3, =0xFFFFFFBC // Build initial LR value to return to non-secure PSP
#endif
STR r3, [r2, #0] // Save on the stack
/* Actually build the stack frame. */
MOV r3, #0 // Build initial register value
STR r3, [r2, #4] // Store initial r4
STR r3, [r2, #8] // Store initial r5
STR r3, [r2, #12] // Store initial r6
STR r3, [r2, #16] // Store initial r7
STR r3, [r2, #20] // Store initial r8
STR r3, [r2, #24] // Store initial r9
STR r3, [r2, #28] // Store initial r10
STR r3, [r2, #32] // Store initial r11
/* Hardware stack follows. */
STR r3, [r2, #36] // Store initial r0
STR r3, [r2, #40] // Store initial r1
STR r3, [r2, #44] // Store initial r2
STR r3, [r2, #48] // Store initial r3
STR r3, [r2, #52] // Store initial r12
MOV r3, #0xFFFFFFFF // Poison EXC_RETURN value
STR r3, [r2, #56] // Store initial lr
STR r1, [r2, #60] // Store initial pc
MOV r3, #0x01000000 // Only T-bit need be set
STR r3, [r2, #64] // Store initial xPSR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = r2;
STR r2, [r0, #8] // Save stack pointer in thread's
// control block
BX lr // Return to caller
// }
END

View File

@ -0,0 +1,91 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
PUBLIC _tx_thread_system_return
_tx_thread_system_return:
/* Return to real scheduler via PendSV. Note that this routine is often
replaced with in-line assembly in tx_port.h to improved performance. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
MRS r0, IPSR // Pickup IPSR
CMP r0, #0 // Is it a thread returning?
BNE _isr_context // If ISR, skip interrupt enable
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Thread context returning, pickup BASEPRI
MOV r0, #0
MSR BASEPRI, r0 // Enable interrupts
MSR BASEPRI, r1 // Restore original interrupt posture
#else
MRS r1, PRIMASK // Thread context returning, pickup PRIMASK
CPSIE i // Enable interrupts
MSR PRIMASK, r1 // Restore original interrupt posture
#endif
_isr_context:
BX lr // Return to caller
// }
END

View File

@ -0,0 +1,251 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
EXTERN _tx_timer_time_slice
EXTERN _tx_timer_system_clock
EXTERN _tx_timer_current_ptr
EXTERN _tx_timer_list_start
EXTERN _tx_timer_list_end
EXTERN _tx_timer_expired_time_slice
EXTERN _tx_timer_expired
EXTERN _tx_thread_time_slice
EXTERN _tx_timer_expiration_process
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_execute_ptr
EXTERN _tx_thread_preempt_disable
SECTION `.text`:CODE:NOROOT(2)
THUMB
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt Cortex-M55/IAR */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* expiration functions are called. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
PUBLIC _tx_timer_interrupt
_tx_timer_interrupt:
/* Upon entry to this routine, it is assumed that the compiler scratch registers are available
for use. */
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR r1, =_tx_timer_system_clock // Pickup address of system clock
LDR r0, [r1, #0] // Pickup system clock
ADD r0, r0, #1 // Increment system clock
STR r0, [r1, #0] // Store new system clock
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
LDR r2, [r3, #0] // Pickup time-slice
CBZ r2, __tx_timer_no_time_slice // Is it non-active?
// Yes, skip time-slice processing
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
SUB r2, r2, #1 // Decrement the time-slice
STR r2, [r3, #0] // Store new time-slice value
/* Check for expiration. */
// if (__tx_timer_time_slice == 0)
CBNZ r2, __tx_timer_no_time_slice // Has it expired?
// No, skip expiration processing
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
MOV r0, #1 // Build expired value
STR r0, [r3, #0] // Set time-slice expiration flag
// }
__tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
LDR r0, [r1, #0] // Pickup current timer
LDR r2, [r0, #0] // Pickup timer list entry
CBZ r2, __tx_timer_no_timer // Is there anything in the list?
// No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR r3, =_tx_timer_expired // Pickup expiration flag address
MOV r2, #1 // Build expired value
STR r2, [r3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD r0, r0, #4 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR r3, =_tx_timer_list_end // Pickup addr of timer list end
LDR r2, [r3, #0] // Pickup list end
CMP r0, r2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR r3, =_tx_timer_list_start // Pickup addr of timer list start
LDR r0, [r3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR r0, [r1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR r2, [r3, #0] // Pickup time-slice expired flag
CBNZ r2, __tx_something_expired // Did a time-slice expire?
// If non-zero, time-slice expired
LDR r1, =_tx_timer_expired // Pickup addr of other expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_nothing_expired // Did a timer expire?
// No, nothing expired
__tx_something_expired:
PUSH {r0, lr} // Save the lr register on the stack
// and save r0 just to keep 8-byte alignment
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR r1, =_tx_timer_expired // Pickup addr of expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_dont_activate // Check for timer expiration
// If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR r2, [r3, #0] // Pickup the actual flag
CBZ r2, __tx_timer_not_ts_expiration // See if the flag is set
// No, skip time-slice processing
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
LDR r0, =_tx_thread_preempt_disable // Build address of preempt disable flag
LDR r1, [r0] // Is the preempt disable flag set?
CBNZ r1, __tx_timer_skip_time_slice // Yes, skip the PendSV logic
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r1, [r0] // Pickup the current thread pointer
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
LDR r3, [r2] // Pickup the execute thread pointer
LDR r0, =0xE000ED04 // Build address of control register
LDR r2, =0x10000000 // Build value for PendSV bit
CMP r1, r3 // Are they the same?
BEQ __tx_timer_skip_time_slice // If the same, there was no time-slice performed
STR r2, [r0] // Not the same, issue the PendSV for preemption
__tx_timer_skip_time_slice:
// }
__tx_timer_not_ts_expiration:
POP {r0, lr} // Recover lr register (r0 is just there for
// the 8-byte stack alignment
// }
__tx_timer_nothing_expired:
DSB // Complete all memory access
BX lr // Return to caller
// }
END

View File

@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack allocate */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_allocate Actual stack alloc function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_allocate(thread_ptr, stack_size);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,120 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txe_thread_secure_stack_free Cortex-M55 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack free */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_free Actual stack free function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_free(TX_THREAD *thread_ptr)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_free(thread_ptr);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,21 @@
target_sources(${PROJECT_NAME} PRIVATE
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_allocate.c
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_free.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_initialize_low_level.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_save.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_control.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_disable.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_schedule.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_allocate.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_free.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_stack_build.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_system_return.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_timer_interrupt.S
)
target_include_directories(${PROJECT_NAME} PUBLIC
inc
)

View File

@ -0,0 +1,649 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M85/AC6 */
/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M85 files. It unifies */
/* the Cortex-M85 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Function prototypes for this port. */
struct TX_THREAD_STRUCT;
UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
/* Services without error checking. */
#define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _tx_thread_secure_stack_free
#else
/* Services with error checking. */
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Initialize secure stacks for threads calling secure functions. */
extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M85/AC6 Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,61 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_secure_interface.h PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file defines the ThreadX secure thread stack components, */
/* including data types and external references. */
/* It is assumed that tx_api.h and tx_port.h have already been */
/* included. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_SECURE_INTERFACE_H
#define TX_SECURE_INTERFACE_H
/* Define internal secure thread stack function prototypes. */
extern UINT _tx_thread_secure_mode_stack_initialize(void);
extern UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size);
extern UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_initialize(void);
extern void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr);
#endif

View File

@ -0,0 +1,228 @@
Microsoft's Azure RTOS ThreadX for Cortex-M85
Using the AC6 Tools in Keil uVision
1. Import the ThreadX Projects
In order to build the ThreadX library and the ThreadX demonstration, first open
the AzureRTOS.uvmpw workspace (located in the "example_build" directory)
into Keil.
2. Building the ThreadX run-time Library
Building the ThreadX library is easy; simply set the ThreadX_Library project
as active, then then build the library. You should now observe the compilation
and assembly of the ThreadX library. This project build produces the ThreadX
library file ThreadX_Library.lib.
Files tx_thread_stack_error_handler.c and tx_thread_stack_error_notify.c
replace the common files of the same name.
3. Demonstration System
The ThreadX demonstration is designed to execute under the Keil debugger on the
FVP_MPS2_Cortex-M85_MDK simulator.
Building the demonstration is easy; simply select the "Batch Build" button.
You should now observe the compilation and assembly of the ThreadX demonstration of
both the demo_secure_zone and demo_threadx_non-secure_zone projects.
Then click the Start/Stop Debug Session button to start the simulator and begin debugging.
You are now ready to execute the ThreadX demonstration.
4. System Initialization
The entry point in ThreadX for the Cortex-M85 using AC6 tools uses the standard AC6
Cortex-M85 reset sequence. From the reset vector the C runtime will be initialized.
The ThreadX tx_initialize_low_level.s file is responsible for setting up
various system data structures, the vector area, and a periodic timer interrupt
source.
In addition, _tx_initialize_low_level determines the first available
address for use by the application, which is supplied as the sole input
parameter to your application definition function, tx_application_define.
5. Register Usage and Stack Frames
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have the same stack frame in the Cortex-M85 version of
ThreadX. The top of the suspended thread's stack is pointed to by
tx_thread_stack_ptr in the associated thread control block TX_THREAD.
Non-FPU Stack Frame:
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 r4 Software stacked GP registers
0x08 r5
0x0C r6
0x10 r7
0x14 r8
0x18 r9
0x1C r10
0x20 r11
0x24 r0 Hardware stacked registers
0x28 r1
0x2C r2
0x30 r3
0x34 r12
0x38 lr
0x3C pc
0x40 xPSR
FPU Stack Frame (only interrupted thread with FPU enabled):
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 s16 Software stacked FPU registers
0x08 s17
0x0C s18
0x10 s19
0x14 s20
0x18 s21
0x1C s22
0x20 s23
0x24 s24
0x28 s25
0x2C s26
0x30 s27
0x34 s28
0x38 s29
0x3C s30
0x40 s31
0x44 r4 Software stacked registers
0x48 r5
0x4C r6
0x50 r7
0x54 r8
0x58 r9
0x5C r10
0x60 r11
0x64 r0 Hardware stacked registers
0x68 r1
0x6C r2
0x70 r3
0x74 r12
0x78 lr
0x7C pc
0x80 xPSR
0x84 s0 Hardware stacked FPU registers
0x88 s1
0x8C s2
0x90 s3
0x94 s4
0x98 s5
0x9C s6
0xA0 s7
0xA4 s8
0xA8 s9
0xAC s10
0xB0 s11
0xB4 s12
0xB8 s13
0xBC s14
0xC0 s15
0xC4 fpscr
6. Improving Performance
To make ThreadX and the application(s) run faster, you can enable
all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
7. Interrupt Handling
ThreadX provides complete and high-performance interrupt handling for Cortex-M85
targets. There are a certain set of requirements that are defined in the
following sub-sections:
7.1 Vector Area
The Cortex-M85 vectors start at the label __Vectors or similar. The application may modify
the vector area according to its needs. There is code in tx_initialize_low_level() that will
configure the vector base register.
7.2 Managed Interrupts
ISRs can be written completely in C (or assembly language) without any calls to
_tx_thread_context_save or _tx_thread_context_restore. These ISRs are allowed access to the
ThreadX API that is available to ISRs.
ISRs written in C will take the form (where "your_C_isr" is an entry in the vector table):
void your_C_isr(void)
{
/* ISR processing goes here, including any needed function calls. */
}
ISRs written in assembly language will take the form:
.global your_assembly_isr
.thumb_func
your_assembly_isr:
; VOID your_assembly_isr(VOID)
; {
PUSH {r0, lr}
;
; /* Do interrupt handler work here */
; /* BL <your interrupt routine in C> */
POP {r0, lr}
BX lr
; }
Note: the Cortex-M85 requires exception handlers to be thumb labels, this implies bit 0 set.
To accomplish this, the declaration of the label has to be preceded by the assembler directive
.thumb_func to instruct the linker to create thumb labels. The label __tx_IntHandler needs to
be inserted in the correct location in the interrupt vector table. This table is typically
located in either your runtime startup file or in the tx_initialize_low_level.s file.
8. FPU Support
ThreadX for Cortex-M85 supports automatic ("lazy") VFP support, which means that applications threads
can simply use the VFP and ThreadX automatically maintains the VFP registers as part of the thread
context.
9. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06-02-2021 Release 6.1.7 changes:
tx_port.h Remove unneeded include file
tx_thread_secure_stack_initialize.S New file
tx_thread_schedule.S Added secure stack initialize to SVC hander
tx_thread_secure_stack.c Fixed stack pointer save, initialize in handler mode
04-02-2021 Release 6.1.6 changes:
tx_port.h Updated macro definition
tx_thread_schedule.s Added low power support
03-02-2021 The following files were changed/added for version 6.1.5:
tx_port.h Added ULONG64_DEFINED
09-30-2020 Initial ThreadX 6.1 version for Cortex-M85 using AC6 tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@ -0,0 +1,278 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
SYSTEM_CLOCK = 6000000
SYSTICK_CYCLES = ((SYSTEM_CLOCK / 100) -1)
/* Setup the stack and heap areas. */
STACK_SIZE = 0x00000400
HEAP_SIZE = 0x00000000
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_initialize_low_level
.thumb_func
.type _tx_initialize_low_level, function
_tx_initialize_low_level:
/* Disable interrupts during ThreadX initialization. */
CPSID i
/* Set base of available memory to end of non-initialised RAM area. */
LDR r0, =_tx_initialize_unused_memory // Build address of unused memory pointer
LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Build first free address
ADD r1, r1, #4 //
STR r1, [r0] // Setup first unused memory pointer
/* Setup Vector Table Offset Register. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =__Vectors // Pickup address of vector table
STR r1, [r0, #0xD08] // Set vector table address
/* Enable the cycle count register. */
LDR r0, =0xE0001000 // Build address of DWT register
LDR r1, [r0] // Pickup the current value
ORR r1, r1, #1 // Set the CYCCNTENA bit
STR r1, [r0] // Enable the cycle count register
/* Set system stack pointer from vector value. */
LDR r0, =_tx_thread_system_stack_ptr // Build address of system stack pointer
LDR r1, =__Vectors // Pickup address of vector table
LDR r1, [r1] // Pickup reset stack pointer
STR r1, [r0] // Save system stack pointer
/* Configure SysTick. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =SYSTICK_CYCLES
STR r1, [r0, #0x14] // Setup SysTick Reload Value
MOV r1, #0x7 // Build SysTick Control Enable Value
STR r1, [r0, #0x10] // Setup SysTick Control
/* Configure handler priorities. */
LDR r1, =0x00000000 // Rsrv, UsgF, BusF, MemM
STR r1, [r0, #0xD18] // Setup System Handlers 4-7 Priority Registers
LDR r1, =0xFF000000 // SVCl, Rsrv, Rsrv, Rsrv
STR r1, [r0, #0xD1C] // Setup System Handlers 8-11 Priority Registers
// Note: SVC must be lowest priority, which is 0xFF
LDR r1, =0x40FF0000 // SysT, PnSV, Rsrv, DbgM
STR r1, [r0, #0xD20] // Setup System Handlers 12-15 Priority Registers
// Note: PnSV must be lowest priority, which is 0xFF
/* Return to caller. */
BX lr
// }
/* Define shells for each of the unused vectors. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_BadHandler
.thumb_func
.type __tx_BadHandler, function
__tx_BadHandler:
B __tx_BadHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_IntHandler
.thumb_func
.type __tx_IntHandler, function
__tx_IntHandler:
// VOID InterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
/* Do interrupt handler work here */
/* .... */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX LR
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SysTick_Handler
.thumb_func
.type SysTick_Handler, function
SysTick_Handler:
// VOID TimerInterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
BL _tx_timer_interrupt
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX LR
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global HardFault_Handler
.thumb_func
.type HardFault_Handler, function
HardFault_Handler:
B HardFault_Handler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global UsageFault_Handler
.thumb_func
.type UsageFault_Handler, function
UsageFault_Handler:
CPSID i // Disable interrupts
// Check for stack limit fault
LDR r0, =0xE000ED28 // CFSR address
LDR r1,[r0] // Pick up CFSR
TST r1, #0x00100000 // Check for Stack Overflow
_unhandled_usage_loop:
BEQ _unhandled_usage_loop // If not stack overflow then loop
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARM_PCS_VFP
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
STR r1, [r0] // Store the value
#endif
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r0,[r0] // Pick up current thread pointer
PUSH {r0,lr} // Save LR (and r0 to maintain stack alignment)
BL _tx_thread_stack_error_handler // Call ThreadX/user handler
POP {r0,lr} // Restore LR and dummy reg
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
// Call the thread exit function to indicate the thread is no longer executing.
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#endif
MOV r1, #0 // Build NULL value
LDR r0, =_tx_thread_current_ptr // Pickup address of current thread pointer
STR r1, [r0] // Clear current thread pointer
// Return from UsageFault_Handler exception
LDR r0, =0xE000ED04 // Load ICSR
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
CPSIE i // Enable interrupts
BX lr // Return from exception
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_NMIHandler
.thumb_func
.type __tx_NMIHandler, function
__tx_NMIHandler:
B __tx_NMIHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_DBGHandler
.thumb_func
.type __tx_DBGHandler, function
__tx_DBGHandler:
B __tx_DBGHandler
.end

View File

@ -0,0 +1,719 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
.global __aeabi_memset
.global _tx_thread_current_ptr
.global _tx_thread_interrupt_disable
.global _tx_thread_interrupt_restore
.global _tx_thread_stack_analyze
.global _tx_thread_stack_error_handler
.global _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_trace_buffer_current_ptr
.global _tx_trace_buffer_end_ptr
.global _tx_trace_buffer_start_ptr
.global _tx_trace_event_enable_bits
.global _tx_trace_full_notify_function
.global _tx_trace_header_ptr
#endif
.global _tx_misra_always_true
.global _tx_misra_block_pool_to_uchar_pointer_convert
.global _tx_misra_byte_pool_to_uchar_pointer_convert
.global _tx_misra_char_to_uchar_pointer_convert
.global _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_entry_to_uchar_pointer_convert
#endif
.global _tx_misra_indirect_void_to_uchar_pointer_convert
.global _tx_misra_memset
.global _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_object_to_uchar_pointer_convert
#endif
.global _tx_misra_pointer_to_ulong_convert
.global _tx_misra_status_get
.global _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_time_stamp_get
#endif
.global _tx_misra_timer_indirect_to_void_pointer_convert
.global _tx_misra_timer_pointer_add
.global _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_trace_event_insert
#endif
.global _tx_misra_uchar_pointer_add
.global _tx_misra_uchar_pointer_dif
.global _tx_misra_uchar_pointer_sub
.global _tx_misra_uchar_to_align_type_pointer_convert
.global _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_entry_pointer_convert
.global _tx_misra_uchar_to_header_pointer_convert
#endif
.global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
.global _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_object_pointer_convert
#endif
.global _tx_misra_uchar_to_void_pointer_convert
.global _tx_misra_ulong_pointer_add
.global _tx_misra_ulong_pointer_dif
.global _tx_misra_ulong_pointer_sub
.global _tx_misra_ulong_to_pointer_convert
.global _tx_misra_ulong_to_thread_pointer_convert
.global _tx_misra_user_timer_pointer_get
.global _tx_misra_void_to_block_pool_pointer_convert
.global _tx_misra_void_to_byte_pool_pointer_convert
.global _tx_misra_void_to_event_flags_pointer_convert
.global _tx_misra_void_to_indirect_uchar_pointer_convert
.global _tx_misra_void_to_mutex_pointer_convert
.global _tx_misra_void_to_queue_pointer_convert
.global _tx_misra_void_to_semaphore_pointer_convert
.global _tx_misra_void_to_thread_pointer_convert
.global _tx_misra_void_to_uchar_pointer_convert
.global _tx_misra_void_to_ulong_pointer_convert
.global _tx_misra_ipsr_get
.global _tx_misra_control_get
.global _tx_misra_control_set
#ifdef __ARM_FP
.global _tx_misra_fpccr_get
.global _tx_misra_vfp_touch
#endif
.global _tx_misra_event_flags_group_not_used
.global _tx_misra_event_flags_set_notify_not_used
.global _tx_misra_queue_not_used
.global _tx_misra_queue_send_notify_not_used
.global _tx_misra_semaphore_not_used
.global _tx_misra_semaphore_put_notify_not_used
.global _tx_misra_thread_entry_exit_notify_not_used
.global _tx_misra_thread_not_used
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
.thumb_func
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
.text
.thumb_func
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N _tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N _tx_misra_message_copy_1
_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N _tx_misra_message_copy_2
_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR R2,=0x54485244
CMP R1,R2
BNE.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_3
_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N _tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N _tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N _tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N _tx_misra_trace_event_insert_4
LDR.N R0,DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N _tx_misra_trace_event_insert_0
_tx_misra_trace_event_insert_4:
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
.data
DataTable2_1:
.word _tx_trace_buffer_current_ptr
.data
DataTable2_2:
.word _tx_trace_event_enable_bits
.data
DataTable2_5:
.word _tx_trace_buffer_end_ptr
.data
DataTable2_6:
.word _tx_trace_buffer_start_ptr
.data
DataTable2_7:
.word _tx_trace_header_ptr
.data
DataTable2_8:
.word _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
.data
DataTable2_3:
.word _tx_thread_system_state
.data
DataTable2_4:
.word _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARM_FP
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
.data
.word 0

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_restore
.thumb_func
.type _tx_thread_context_restore, function
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_enter
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_save
.thumb_func
.type _tx_thread_context_save, function
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,82 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
.section .text
.balign 4
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_control
.thumb_func
.type _tx_thread_interrupt_control, function
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_disable
.thumb_func
.type _tx_thread_interrupt_disable, function
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }
.end

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_restore
.thumb_func
.type _tx_thread_interrupt_restore, function
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }
.end

View File

@ -0,0 +1,391 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_thread_enter
.global _tx_execution_thread_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M85/AC6 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 04-02-2021 Scott Larson Modified comment(s), added */
/* low power code, */
/* resulting in version 6.1.6 */
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_schedule
.thumb_func
.type _tx_thread_schedule, function
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARM_PCS_VFP
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global PendSV_Handler
.thumb_func
.type PendSV_Handler, function
/* Get current thread value and new thread pointer. */
PendSV_Handler:
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_PCS_VFP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
STR r12, [r1, #8] // Save the thread stack pointer
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Save secure context
LDR r5, [r1,#0x90] // Load secure stack index
CBZ r5, _skip_secure_save // Skip save if there is no secure context
PUSH {r0,r1,r2,r3} // Save scratch registers
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_save // Save secure stack
POP {r0,r1,r2,r3} // Restore secure registers
_skip_secure_save:
#endif
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Restore secure context
LDR r0, [r1,#0x90] // Load secure stack index
CBZ r0, _skip_secure_restore // Skip restore if there is no secure context
PUSH {r0,r1} // Save r1 (and dummy r0)
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_restore // Restore secure stack
POP {r0,r1} // Restore r1 (and dummy r0)
_skip_secure_restore:
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #12] // Get stack start
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_PCS_VFP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// SVC_Handler is not needed when ThreadX is running in single mode.
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SVC_Handler
.thumb_func
.type SVC_Handler, function
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ r0, MSP // Get MSP if return stack is MSP
MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r1, [r1,#-2] // Load SVC number
CMP r1, #1 // Is it a secure stack allocate request?
BEQ _tx_svc_secure_alloc // Yes, go there
CMP r1, #2 // Is it a secure stack free request?
BEQ _tx_svc_secure_free // Yes, go there
CMP r1, #3 // Is it a secure stack init request?
BEQ _tx_svc_secure_init // Yes, go there
// Unknown SVC argument - just return
BX lr
_tx_svc_secure_alloc:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_allocate
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_free:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_free
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_init:
PUSH {r0,lr} // Save SP and EXC_RETURN
BL _tx_thread_secure_mode_stack_initialize
POP {r12,lr} // Restore SP and EXC_RETURN
BX lr
#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_vfp_access
.thumb_func
.type _tx_vfp_access, function
_tx_vfp_access:
VMOV.F32 s0, s0 // Simply access the VFP
BX lr // Return to caller
.end

View File

@ -0,0 +1,592 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_SOURCE_CODE
#include "cmsis_compiler.h" /* For intrinsic functions. */
#include "tx_secure_interface.h" /* Interface for NS code. */
/* Minimum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MINIMUM
#define TX_THREAD_SECURE_STACK_MINIMUM 256
#endif
/* Maximum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MAXIMUM
#define TX_THREAD_SECURE_STACK_MAXIMUM 1024
#endif
/* 8 bytes added to stack size to "seal" stack. */
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
VOID *tx_thread_secure_stack_ptr; /* Thread's secure stack current pointer */
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M85/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function initializes secure mode to use PSP stack. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* status */
/* */
/* CALLS */
/* */
/* __get_CONTROL Intrinsic to get CONTROL */
/* __set_CONTROL Intrinsic to set CONTROL */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Modified comment(s), and */
/* changed name, execute in */
/* handler mode, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else
{
/* Set secure mode to use PSP. */
__set_CONTROL(__get_CONTROL() | 2);
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M85/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function allocates a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of stack to allocates */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_SIZE_ERROR Invalid stack size */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* malloc Compiler's malloc function */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* __TZ_get_PSPLIM_NS Intrinsic to get NS PSP */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
else if (stack_size < TX_THREAD_SECURE_STACK_MINIMUM || stack_size > TX_THREAD_SECURE_STACK_MAXIMUM)
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
info_ptr -> tx_thread_secure_stack_limit = stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == __TZ_get_PSPLIM_NS())
{
/* If this thread is running, set Secure PSP and PSPLIM. */
__set_PSPLIM((ULONG)(info_ptr -> tx_thread_secure_stack_limit));
__set_PSP((ULONG)(info_ptr -> tx_thread_secure_stack_ptr));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M85/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function frees a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M85/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __get_PSP Intrinsic to get PSP */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
sp = __get_PSP();
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
return;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M85/AC6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
__set_PSPLIM((ULONG)info_ptr -> tx_thread_secure_stack_limit);
__set_PSP ((ULONG)info_ptr -> tx_thread_secure_stack_ptr);
return;
}
#endif

View File

@ -0,0 +1,85 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to allocate a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 1 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_allocate
.thumb_func
.type _tx_thread_secure_stack_allocate, function
_tx_thread_secure_stack_allocate:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 1
CMP r3, #0 // If interrupts enabled, just return
BEQ _alloc_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_alloc_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_free Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to free a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 2 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_free(TX_THREAD *thread_ptr)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_free
.thumb_func
.type _tx_thread_secure_stack_free, function
_tx_thread_secure_stack_free:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 2
CMP r3, #0 // If interrupts enabled, just return
BEQ _free_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_free_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_initialize Cortex-M85/AC6 */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to initialize a secure stack. */
/* */
/* INPUT */
/* */
/* none */
/* */
/* OUTPUT */
/* */
/* none */
/* */
/* CALLS */
/* */
/* SVC 3 */
/* */
/* CALLED BY */
/* */
/* TX_INITIALIZE_KERNEL_ENTER_EXTENSION */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_secure_stack_initialize(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_initialize
.thumb_func
.type _tx_thread_secure_stack_initialize, function
_tx_thread_secure_stack_initialize:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
CPSIE i // Enable interrupts for SVC call
SVC 3
CPSID i // Disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
BX lr
.end

View File

@ -0,0 +1,140 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_stack_build
.thumb_func
.type _tx_thread_stack_build, function
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-M should look like the following after it is built:
Stack Top:
LR Interrupted LR (LR at time of PENDSV)
r4 Initial value for r4
r5 Initial value for r5
r6 Initial value for r6
r7 Initial value for r7
r8 Initial value for r8
r9 Initial value for r9
r10 Initial value for r10
r11 Initial value for r11
r0 Initial value for r0 (Hardware stack starts here!!)
r1 Initial value for r1
r2 Initial value for r2
r3 Initial value for r3
r12 Initial value for r12
lr Initial value for lr
pc Initial value for pc
xPSR Initial value for xPSR
Stack Bottom: (higher memory address) */
LDR r2, [r0, #16] // Pickup end of stack area
BIC r2, r2, #0x7 // Align frame for 8-byte alignment
SUB r2, r2, #68 // Subtract frame size
#ifdef TX_SINGLE_MODE_SECURE
LDR r3, =0xFFFFFFFD // Build initial LR value for secure mode
#else
LDR r3, =0xFFFFFFBC // Build initial LR value to return to non-secure PSP
#endif
STR r3, [r2, #0] // Save on the stack
/* Actually build the stack frame. */
MOV r3, #0 // Build initial register value
STR r3, [r2, #4] // Store initial r4
STR r3, [r2, #8] // Store initial r5
STR r3, [r2, #12] // Store initial r6
STR r3, [r2, #16] // Store initial r7
STR r3, [r2, #20] // Store initial r8
STR r3, [r2, #24] // Store initial r9
STR r3, [r2, #28] // Store initial r10
STR r3, [r2, #32] // Store initial r11
/* Hardware stack follows. */
STR r3, [r2, #36] // Store initial r0
STR r3, [r2, #40] // Store initial r1
STR r3, [r2, #44] // Store initial r2
STR r3, [r2, #48] // Store initial r3
STR r3, [r2, #52] // Store initial r12
MOV r3, #0xFFFFFFFF // Poison EXC_RETURN value
STR r3, [r2, #56] // Store initial lr
STR r1, [r2, #60] // Store initial pc
MOV r3, #0x01000000 // Only T-bit need be set
STR r3, [r2, #64] // Store initial xPSR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = r2;
STR r2, [r0, #8] // Save stack pointer in thread's
// control block
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,96 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_system_return
.thumb_func
.type _tx_thread_system_return, function
_tx_thread_system_return:
/* Return to real scheduler via PendSV. Note that this routine is often
replaced with in-line assembly in tx_port.h to improved performance. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
MRS r0, IPSR // Pickup IPSR
CMP r0, #0 // Is it a thread returning?
BNE _isr_context // If ISR, skip interrupt enable
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Thread context returning, pickup BASEPRI
MOV r0, #0
MSR BASEPRI, r0 // Enable interrupts
MSR BASEPRI, r1 // Restore original interrupt posture
#else
MRS r1, PRIMASK // Thread context returning, pickup PRIMASK
CPSIE i // Enable interrupts
MSR PRIMASK, r1 // Restore original interrupt posture
#endif
_isr_context:
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,243 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt Cortex-M85/AC6 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* expiration functions are called. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_timer_interrupt
.thumb_func
.type _tx_timer_interrupt, function
_tx_timer_interrupt:
/* Upon entry to this routine, it is assumed that the compiler scratch registers are available
for use. */
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR r1, =_tx_timer_system_clock // Pickup address of system clock
LDR r0, [r1, #0] // Pickup system clock
ADD r0, r0, #1 // Increment system clock
STR r0, [r1, #0] // Store new system clock
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
LDR r2, [r3, #0] // Pickup time-slice
CBZ r2, __tx_timer_no_time_slice // Is it non-active?
// Yes, skip time-slice processing
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
SUB r2, r2, #1 // Decrement the time-slice
STR r2, [r3, #0] // Store new time-slice value
/* Check for expiration. */
// if (__tx_timer_time_slice == 0)
CBNZ r2, __tx_timer_no_time_slice // Has it expired?
// No, skip expiration processing
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
MOV r0, #1 // Build expired value
STR r0, [r3, #0] // Set time-slice expiration flag
// }
__tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
LDR r0, [r1, #0] // Pickup current timer
LDR r2, [r0, #0] // Pickup timer list entry
CBZ r2, __tx_timer_no_timer // Is there anything in the list?
// No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR r3, =_tx_timer_expired // Pickup expiration flag address
MOV r2, #1 // Build expired value
STR r2, [r3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD r0, r0, #4 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR r3, =_tx_timer_list_end // Pickup addr of timer list end
LDR r2, [r3, #0] // Pickup list end
CMP r0, r2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR r3, =_tx_timer_list_start // Pickup addr of timer list start
LDR r0, [r3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR r0, [r1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR r2, [r3, #0] // Pickup time-slice expired flag
CBNZ r2, __tx_something_expired // Did a time-slice expire?
// If non-zero, time-slice expired
LDR r1, =_tx_timer_expired // Pickup addr of other expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_nothing_expired // Did a timer expire?
// No, nothing expired
__tx_something_expired:
PUSH {r0, lr} // Save the lr register on the stack
// and save r0 just to keep 8-byte alignment
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR r1, =_tx_timer_expired // Pickup addr of expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_dont_activate // Check for timer expiration
// If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR r2, [r3, #0] // Pickup the actual flag
CBZ r2, __tx_timer_not_ts_expiration // See if the flag is set
// No, skip time-slice processing
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
LDR r0, =_tx_thread_preempt_disable // Build address of preempt disable flag
LDR r1, [r0] // Is the preempt disable flag set?
CBNZ r1, __tx_timer_skip_time_slice // Yes, skip the PendSV logic
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r1, [r0] // Pickup the current thread pointer
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
LDR r3, [r2] // Pickup the execute thread pointer
LDR r0, =0xE000ED04 // Build address of control register
LDR r2, =0x10000000 // Build value for PendSV bit
CMP r1, r3 // Are they the same?
BEQ __tx_timer_skip_time_slice // If the same, there was no time-slice performed
STR r2, [r0] // Not the same, issue the PendSV for preemption
__tx_timer_skip_time_slice:
// }
__tx_timer_not_ts_expiration:
POP {r0, lr} // Recover lr register (r0 is just there for
// the 8-byte stack alignment
// }
__tx_timer_nothing_expired:
DSB // Complete all memory access
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M85 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack allocate */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_allocate Actual stack alloc function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_allocate(thread_ptr, stack_size);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,120 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define TX_SOURCE_CODE
/* Include necessary system files. */
#include "tx_api.h"
#include "tx_initialize.h"
#include "tx_thread.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txe_thread_secure_stack_free Cortex-M85 */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function checks for errors in the secure stack free */
/* function call. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* _tx_thread_secure_stack_free Actual stack free function */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txe_thread_secure_stack_free(TX_THREAD *thread_ptr)
{
#if defined(TX_SINGLE_MODE_SECURE) || defined(TX_SINGLE_MODE_NON_SECURE)
return(TX_FEATURE_NOT_ENABLED);
#else
UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Check for an invalid thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Now check for invalid thread ID. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is invalid, return appropriate error code. */
status = TX_THREAD_ERROR;
}
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Is call from an interrupt and not initialization? */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
/* Invalid caller of this function, return appropriate error code. */
status = TX_CALLER_ERROR;
}
}
/* Determine if everything is okay. */
if (status == TX_SUCCESS)
{
/* Call actual secure stack allocate function. */
status = _tx_thread_secure_stack_free(thread_ptr);
}
/* Return completion status. */
return(status);
#endif
}

View File

@ -0,0 +1,21 @@
target_sources(${PROJECT_NAME} PRIVATE
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_allocate.c
${CMAKE_CURRENT_LIST_DIR}/src/txe_thread_secure_stack_free.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_initialize_low_level.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_save.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_control.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_disable.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_restore.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_schedule.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack.c
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_allocate.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_secure_stack_free.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_stack_build.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_system_return.S
${CMAKE_CURRENT_LIST_DIR}/src/tx_timer_interrupt.S
)
target_include_directories(${PROJECT_NAME} PUBLIC
inc
)

View File

@ -0,0 +1,649 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M85/GNU */
/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M85 files. It unifies */
/* the Cortex-M85 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Function prototypes for this port. */
struct TX_THREAD_STRUCT;
UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
/* Services without error checking. */
#define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _tx_thread_secure_stack_free
#else
/* Services with error checking. */
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Initialize secure stacks for threads calling secure functions. */
extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M85/GNU Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,61 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_secure_interface.h PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file defines the ThreadX secure thread stack components, */
/* including data types and external references. */
/* It is assumed that tx_api.h and tx_port.h have already been */
/* included. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_SECURE_INTERFACE_H
#define TX_SECURE_INTERFACE_H
/* Define internal secure thread stack function prototypes. */
extern UINT _tx_thread_secure_mode_stack_initialize(void);
extern UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size);
extern UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_initialize(void);
extern void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr);
extern void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr);
#endif

View File

@ -0,0 +1,210 @@
Microsoft's Azure RTOS ThreadX for Cortex-M85
Using the GNU Tools
1. Building the ThreadX run-time Library
Import all ThreadX common and port-specific source files into a GNU project.
Configure the project to build a library rather than an executable. This
results in the ThreadX run-time library file tx.a, which is needed by
the application.
Files tx_thread_stack_error_handler.c and tx_thread_stack_error_notify.c
replace the common files of the same name.
2. Demonstration System
No demonstration project is provided.
3. System Initialization
The entry point in ThreadX for the Cortex-M85 using gnu tools uses the standard GNU
Cortex-M85 reset sequence. From the reset vector the C runtime will be initialized.
The ThreadX tx_initialize_low_level.S file is responsible for setting up
various system data structures, the vector area, and a periodic timer interrupt
source.
In addition, _tx_initialize_low_level determines the first available
address for use by the application, which is supplied as the sole input
parameter to your application definition function, tx_application_define.
4. Register Usage and Stack Frames
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have the same stack frame in the Cortex-M85 version of
ThreadX. The top of the suspended thread's stack is pointed to by
tx_thread_stack_ptr in the associated thread control block TX_THREAD.
Non-FPU Stack Frame:
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 r4 Software stacked GP registers
0x08 r5
0x0C r6
0x10 r7
0x14 r8
0x18 r9
0x1C r10
0x20 r11
0x24 r0 Hardware stacked registers
0x28 r1
0x2C r2
0x30 r3
0x34 r12
0x38 lr
0x3C pc
0x40 xPSR
FPU Stack Frame (only interrupted thread with FPU enabled):
Stack Offset Stack Contents
0x00 LR Interrupted LR (LR at time of PENDSV)
0x04 s16 Software stacked FPU registers
0x08 s17
0x0C s18
0x10 s19
0x14 s20
0x18 s21
0x1C s22
0x20 s23
0x24 s24
0x28 s25
0x2C s26
0x30 s27
0x34 s28
0x38 s29
0x3C s30
0x40 s31
0x44 r4 Software stacked registers
0x48 r5
0x4C r6
0x50 r7
0x54 r8
0x58 r9
0x5C r10
0x60 r11
0x64 r0 Hardware stacked registers
0x68 r1
0x6C r2
0x70 r3
0x74 r12
0x78 lr
0x7C pc
0x80 xPSR
0x84 s0 Hardware stacked FPU registers
0x88 s1
0x8C s2
0x90 s3
0x94 s4
0x98 s5
0x9C s6
0xA0 s7
0xA4 s8
0xA8 s9
0xAC s10
0xB0 s11
0xB4 s12
0xB8 s13
0xBC s14
0xC0 s15
0xC4 fpscr
5. Improving Performance
To make ThreadX and the application(s) run faster, you can enable
all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
6. Interrupt Handling
ThreadX provides complete and high-performance interrupt handling for Cortex-M85
targets. There are a certain set of requirements that are defined in the
following sub-sections:
6.1 Vector Area
The Cortex-M85 vectors start at the label __tx_vectors or similar. The application may modify
the vector area according to its needs. There is code in tx_initialize_low_level() that will
configure the vector base register.
6.2 Managed Interrupts
ISRs can be written completely in C (or assembly language) without any calls to
_tx_thread_context_save or _tx_thread_context_restore. These ISRs are allowed access to the
ThreadX API that is available to ISRs.
ISRs written in C will take the form (where "your_C_isr" is an entry in the vector table):
void your_C_isr(void)
{
/* ISR processing goes here, including any needed function calls. */
}
ISRs written in assembly language will take the form:
.global your_assembly_isr
.thumb_func
your_assembly_isr:
PUSH {r0, lr}
;
; /* Do interrupt handler work here */
; /* BL <your interrupt routine in C> */
POP {r0, lr}
BX lr
Note: the Cortex-M85 requires exception handlers to be thumb labels, this implies bit 0 set.
To accomplish this, the declaration of the label has to be preceded by the assembler directive
.thumb_func to instruct the linker to create thumb labels. The label __tx_IntHandler needs to
be inserted in the correct location in the interrupt vector table. This table is typically
located in either your runtime startup file or in the tx_initialize_low_level.S file.
7. FPU Support
ThreadX for Cortex-M85 supports automatic ("lazy") VFP support, which means that applications threads
can simply use the VFP and ThreadX automatically maintains the VFP registers as part of the thread
context.
8. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06-02-2021 Release 6.1.7 changes:
tx_thread_secure_stack_initialize.S New file
tx_thread_schedule.S Added secure stack initialize to SVC hander
tx_thread_secure_stack.c Fixed stack pointer save, initialize in handler mode
04-02-2021 Release 6.1.6 changes:
tx_port.h Updated macro definition
tx_thread_schedule.s Added low power support
03-02-2021 The following files were changed/added for version 6.1.5:
tx_port.h Added ULONG64_DEFINED
09-30-2020 Initial ThreadX 6.1 version for Cortex-M85 using GNU tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@ -0,0 +1,278 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
SYSTEM_CLOCK = 6000000
SYSTICK_CYCLES = ((SYSTEM_CLOCK / 100) -1)
/* Setup the stack and heap areas. */
STACK_SIZE = 0x00000400
HEAP_SIZE = 0x00000000
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M85/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_initialize_low_level
.thumb_func
.type _tx_initialize_low_level, function
_tx_initialize_low_level:
/* Disable interrupts during ThreadX initialization. */
CPSID i
/* Set base of available memory to end of non-initialised RAM area. */
LDR r0, =_tx_initialize_unused_memory // Build address of unused memory pointer
LDR r1, =__RAM_segment_used_end__ // Build first free address
ADD r1, r1, #4 //
STR r1, [r0] // Setup first unused memory pointer
/* Setup Vector Table Offset Register. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =_vectors // Pickup address of vector table
STR r1, [r0, #0xD08] // Set vector table address
/* Enable the cycle count register. */
LDR r0, =0xE0001000 // Build address of DWT register
LDR r1, [r0] // Pickup the current value
ORR r1, r1, #1 // Set the CYCCNTENA bit
STR r1, [r0] // Enable the cycle count register
/* Set system stack pointer from vector value. */
LDR r0, =_tx_thread_system_stack_ptr // Build address of system stack pointer
LDR r1, =_vectors // Pickup address of vector table
LDR r1, [r1] // Pickup reset stack pointer
STR r1, [r0] // Save system stack pointer
/* Configure SysTick. */
MOV r0, #0xE000E000 // Build address of NVIC registers
LDR r1, =SYSTICK_CYCLES
STR r1, [r0, #0x14] // Setup SysTick Reload Value
MOV r1, #0x7 // Build SysTick Control Enable Value
STR r1, [r0, #0x10] // Setup SysTick Control
/* Configure handler priorities. */
LDR r1, =0x00000000 // Rsrv, UsgF, BusF, MemM
STR r1, [r0, #0xD18] // Setup System Handlers 4-7 Priority Registers
LDR r1, =0xFF000000 // SVCl, Rsrv, Rsrv, Rsrv
STR r1, [r0, #0xD1C] // Setup System Handlers 8-11 Priority Registers
// Note: SVC must be lowest priority, which is 0xFF
LDR r1, =0x40FF0000 // SysT, PnSV, Rsrv, DbgM
STR r1, [r0, #0xD20] // Setup System Handlers 12-15 Priority Registers
// Note: PnSV must be lowest priority, which is 0xFF
/* Return to caller. */
BX lr
// }
/* Define shells for each of the unused vectors. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_BadHandler
.thumb_func
.type __tx_BadHandler, function
__tx_BadHandler:
B __tx_BadHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_IntHandler
.thumb_func
.type __tx_IntHandler, function
__tx_IntHandler:
// VOID InterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
/* Do interrupt handler work here */
/* .... */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SysTick_Handler
.thumb_func
.type SysTick_Handler, function
SysTick_Handler:
// VOID TimerInterruptHandler (VOID)
// {
PUSH {r0,lr} // Save LR (and dummy r0 to maintain stack alignment)
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_enter // Call the ISR enter function
#endif
BL _tx_timer_interrupt
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
POP {r0,lr}
BX lr
// }
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global HardFault_Handler
.thumb_func
.type HardFault_Handler, function
HardFault_Handler:
B HardFault_Handler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global UsageFault_Handler
.thumb_func
.type UsageFault_Handler, function
UsageFault_Handler:
CPSID i // Disable interrupts
// Check for stack limit fault
LDR r0, =0xE000ED28 // CFSR address
LDR r1,[r0] // Pick up CFSR
TST r1, #0x00100000 // Check for Stack Overflow
_unhandled_usage_loop:
BEQ _unhandled_usage_loop // If not stack overflow then loop
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARM_FP
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
STR r1, [r0] // Store the value
#endif
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r0,[r0] // Pick up current thread pointer
PUSH {r0,lr} // Save LR (and r0 to maintain stack alignment)
BL _tx_thread_stack_error_handler // Call ThreadX/user handler
POP {r0,lr} // Restore LR and dummy reg
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
// Call the thread exit function to indicate the thread is no longer executing.
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#endif
MOV r1, #0 // Build NULL value
LDR r0, =_tx_thread_current_ptr // Pickup address of current thread pointer
STR r1, [r0] // Clear current thread pointer
// Return from UsageFault_Handler exception
LDR r0, =0xE000ED04 // Load ICSR
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
CPSIE i // Enable interrupts
BX lr // Return from exception
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_NMIHandler
.thumb_func
.type __tx_NMIHandler, function
__tx_NMIHandler:
B __tx_NMIHandler
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global __tx_DBGHandler
.thumb_func
.type __tx_DBGHandler, function
__tx_DBGHandler:
B __tx_DBGHandler
.end

View File

@ -0,0 +1,719 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
.global __aeabi_memset
.global _tx_thread_current_ptr
.global _tx_thread_interrupt_disable
.global _tx_thread_interrupt_restore
.global _tx_thread_stack_analyze
.global _tx_thread_stack_error_handler
.global _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_trace_buffer_current_ptr
.global _tx_trace_buffer_end_ptr
.global _tx_trace_buffer_start_ptr
.global _tx_trace_event_enable_bits
.global _tx_trace_full_notify_function
.global _tx_trace_header_ptr
#endif
.global _tx_misra_always_true
.global _tx_misra_block_pool_to_uchar_pointer_convert
.global _tx_misra_byte_pool_to_uchar_pointer_convert
.global _tx_misra_char_to_uchar_pointer_convert
.global _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_entry_to_uchar_pointer_convert
#endif
.global _tx_misra_indirect_void_to_uchar_pointer_convert
.global _tx_misra_memset
.global _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_object_to_uchar_pointer_convert
#endif
.global _tx_misra_pointer_to_ulong_convert
.global _tx_misra_status_get
.global _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_time_stamp_get
#endif
.global _tx_misra_timer_indirect_to_void_pointer_convert
.global _tx_misra_timer_pointer_add
.global _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_trace_event_insert
#endif
.global _tx_misra_uchar_pointer_add
.global _tx_misra_uchar_pointer_dif
.global _tx_misra_uchar_pointer_sub
.global _tx_misra_uchar_to_align_type_pointer_convert
.global _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_entry_pointer_convert
.global _tx_misra_uchar_to_header_pointer_convert
#endif
.global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
.global _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_object_pointer_convert
#endif
.global _tx_misra_uchar_to_void_pointer_convert
.global _tx_misra_ulong_pointer_add
.global _tx_misra_ulong_pointer_dif
.global _tx_misra_ulong_pointer_sub
.global _tx_misra_ulong_to_pointer_convert
.global _tx_misra_ulong_to_thread_pointer_convert
.global _tx_misra_user_timer_pointer_get
.global _tx_misra_void_to_block_pool_pointer_convert
.global _tx_misra_void_to_byte_pool_pointer_convert
.global _tx_misra_void_to_event_flags_pointer_convert
.global _tx_misra_void_to_indirect_uchar_pointer_convert
.global _tx_misra_void_to_mutex_pointer_convert
.global _tx_misra_void_to_queue_pointer_convert
.global _tx_misra_void_to_semaphore_pointer_convert
.global _tx_misra_void_to_thread_pointer_convert
.global _tx_misra_void_to_uchar_pointer_convert
.global _tx_misra_void_to_ulong_pointer_convert
.global _tx_misra_ipsr_get
.global _tx_misra_control_get
.global _tx_misra_control_set
#ifdef __ARM_FP
.global _tx_misra_fpccr_get
.global _tx_misra_vfp_touch
#endif
.global _tx_misra_event_flags_group_not_used
.global _tx_misra_event_flags_set_notify_not_used
.global _tx_misra_queue_not_used
.global _tx_misra_queue_send_notify_not_used
.global _tx_misra_semaphore_not_used
.global _tx_misra_semaphore_put_notify_not_used
.global _tx_misra_thread_entry_exit_notify_not_used
.global _tx_misra_thread_not_used
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
.thumb_func
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
.text
.thumb_func
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N _tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N _tx_misra_message_copy_1
_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N _tx_misra_message_copy_2
_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR R2,=0x54485244
CMP R1,R2
BNE.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_3
_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N _tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N _tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N _tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N _tx_misra_trace_event_insert_4
LDR.N R0,DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N _tx_misra_trace_event_insert_0
_tx_misra_trace_event_insert_4:
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
.data
DataTable2_1:
.word _tx_trace_buffer_current_ptr
.data
DataTable2_2:
.word _tx_trace_event_enable_bits
.data
DataTable2_5:
.word _tx_trace_buffer_end_ptr
.data
DataTable2_6:
.word _tx_trace_buffer_start_ptr
.data
DataTable2_7:
.word _tx_trace_header_ptr
.data
DataTable2_8:
.word _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
.data
DataTable2_3:
.word _tx_thread_system_state
.data
DataTable2_4:
.word _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARM_FP
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
.data
.word 0

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_exit
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_restore
.thumb_func
.type _tx_thread_context_restore, function
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_enter
#endif
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is not needed for Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_context_save
.thumb_func
.type _tx_thread_context_save, function
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
BX lr
// }
.end

View File

@ -0,0 +1,82 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
.section .text
.balign 4
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_control
.thumb_func
.type _tx_thread_interrupt_control, function
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_disable
.thumb_func
.type _tx_thread_interrupt_disable, function
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }
.end

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_interrupt_restore
.thumb_func
.type _tx_thread_interrupt_restore, function
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }
.end

View File

@ -0,0 +1,391 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M85/GNU */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 04-02-2021 Scott Larson Modified comment(s), added */
/* low power code, */
/* resulting in version 6.1.6 */
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_schedule
.thumb_func
.type _tx_thread_schedule, function
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARM_FP
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global PendSV_Handler
.thumb_func
.type PendSV_Handler, function
/* Get current thread value and new thread pointer. */
PendSV_Handler:
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
STR r12, [r1, #8] // Save the thread stack pointer
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Save secure context
LDR r5, [r1,#0x90] // Load secure stack index
CBZ r5, _skip_secure_save // Skip save if there is no secure context
PUSH {r0,r1,r2,r3} // Save scratch registers
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_save // Save secure stack
POP {r0,r1,r2,r3} // Restore secure registers
_skip_secure_save:
#endif
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// Restore secure context
LDR r0, [r1,#0x90] // Load secure stack index
CBZ r0, _skip_secure_restore // Skip restore if there is no secure context
PUSH {r0,r1} // Save r1 (and dummy r0)
MOV r0, r1 // Move thread ptr to r0
BL _tx_thread_secure_stack_context_restore // Restore secure stack
POP {r0,r1} // Restore r1 (and dummy r0)
_skip_secure_restore:
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #12] // Get stack start
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#if (!defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE))
// SVC_Handler is not needed when ThreadX is running in single mode.
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global SVC_Handler
.thumb_func
.type SVC_Handler, function
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ r0, MSP // Get MSP if return stack is MSP
MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r1, [r1,#-2] // Load SVC number
CMP r1, #1 // Is it a secure stack allocate request?
BEQ _tx_svc_secure_alloc // Yes, go there
CMP r1, #2 // Is it a secure stack free request?
BEQ _tx_svc_secure_free // Yes, go there
CMP r1, #3 // Is it a secure stack init request?
BEQ _tx_svc_secure_init // Yes, go there
// Unknown SVC argument - just return
BX lr
_tx_svc_secure_alloc:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_allocate
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_free:
PUSH {r0,lr} // Save SP and EXC_RETURN
LDM r0, {r0-r3} // Load function parameters from stack
BL _tx_thread_secure_mode_stack_free
POP {r12,lr} // Restore SP and EXC_RETURN
STR r0,[r12] // Store function return value
BX lr
_tx_svc_secure_init:
PUSH {r0,lr} // Save SP and EXC_RETURN
BL _tx_thread_secure_mode_stack_initialize
POP {r12,lr} // Restore SP and EXC_RETURN
BX lr
#endif // End of ifndef TX_SINGLE_MODE_SECURE, TX_SINGLE_MODE_NON_SECURE
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_vfp_access
.thumb_func
.type _tx_vfp_access, function
_tx_vfp_access:
#if TX_ENABLE_FPU_SUPPORT
VMOV.F32 s0, s0 // Simply access the VFP
#endif
BX lr // Return to caller
.end

View File

@ -0,0 +1,596 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_SOURCE_CODE
#include "tx_secure_interface.h" /* Interface for NS code. */
/* Minimum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MINIMUM
#define TX_THREAD_SECURE_STACK_MINIMUM 256
#endif
/* Maximum size of secure stack. */
#ifndef TX_THREAD_SECURE_STACK_MAXIMUM
#define TX_THREAD_SECURE_STACK_MAXIMUM 1024
#endif
/* 8 bytes added to stack size to "seal" stack. */
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
VOID *tx_thread_secure_stack_ptr; /* Thread's secure stack current pointer */
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M85/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function initializes secure mode to use PSP stack. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* status */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Change name, execute in */
/* handler mode, */
/* disable optimizations, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry, optimize(0)))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
ULONG control;
ULONG ipsr;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
else
{
/* Set secure mode to use PSP. */
asm volatile("MRS %0, CONTROL" : "=r" (control)); /* Get CONTROL register. */
control |= 2; /* Use PSP. */
asm volatile("MSR CONTROL, %0" :: "r" (control)); /* Set CONTROL register. */
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M85/GNU */
/* 6.1.11a */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function allocates a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of stack to allocates */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_SIZE_ERROR Invalid stack size */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* malloc Compiler's malloc function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* 05-02-2022 Scott Larson Modified comment(s), added */
/* TX_INTERRUPT_SAVE_AREA, */
/* resulting in version 6.1.11a*/
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
ULONG ipsr;
ULONG psplim_ns;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
else if (stack_size < TX_THREAD_SECURE_STACK_MINIMUM || stack_size > TX_THREAD_SECURE_STACK_MAXIMUM)
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
info_ptr -> tx_thread_secure_stack_limit = stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
asm volatile("MRS %0, PSPLIM_NS" : "=r" (psplim_ns)); /* Get PSPLIM_NS register. */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == psplim_ns)
{
/* If this thread is running, set Secure PSP and PSPLIM. */
asm volatile("MSR PSPLIM, %0" :: "r" ((ULONG)(info_ptr -> tx_thread_secure_stack_limit)));
asm volatile("MSR PSP, %0" :: "r" ((ULONG)(info_ptr -> tx_thread_secure_stack_ptr)));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M85/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function frees a thread's secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* TX_THREAD_ERROR Invalid thread pointer */
/* TX_CALLER_ERROR Invalid caller of function */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
/* SVC Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M85/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
asm volatile("MRS %0, PSP" : "=r" (sp)); /* Get PSP register. */
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
return;
}
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M85/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores context of the secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* PendSV Handler */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
asm volatile("MSR PSPLIM, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_limit));
asm volatile("MSR PSP, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_ptr));
return;
}
#endif

View File

@ -0,0 +1,85 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_allocate Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to allocate a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* stack_size Size of secure stack to */
/* allocate */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 1 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_allocate
.thumb_func
.type _tx_thread_secure_stack_allocate, function
_tx_thread_secure_stack_allocate:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 1
CMP r3, #0 // If interrupts enabled, just return
BEQ _alloc_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_alloc_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,83 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_free Cortex-M85/GNU */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to free a secure stack. */
/* */
/* INPUT */
/* */
/* thread_ptr Thread control block pointer */
/* */
/* OUTPUT */
/* */
/* status Actual completion status */
/* */
/* CALLS */
/* */
/* SVC 2 */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
// UINT _tx_thread_secure_stack_free(TX_THREAD *thread_ptr)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_free
.thumb_func
.type _tx_thread_secure_stack_free, function
_tx_thread_secure_stack_free:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
MRS r3, PRIMASK // Save interrupt mask
CPSIE i // Enable interrupts for SVC call
SVC 2
CMP r3, #0 // If interrupts enabled, just return
BEQ _free_return_interrupt_enabled
CPSID i // Otherwise, disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
_free_return_interrupt_enabled:
BX lr
.end

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_initialize Cortex-M85/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function enters the SVC handler to initialize a secure stack. */
/* */
/* INPUT */
/* */
/* none */
/* */
/* OUTPUT */
/* */
/* none */
/* */
/* CALLS */
/* */
/* SVC 3 */
/* */
/* CALLED BY */
/* */
/* TX_INITIALIZE_KERNEL_ENTER_EXTENSION */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_secure_stack_initialize(VOID)
// {
.section .text
.balign 4
.syntax unified
.eabi_attribute Tag_ABI_align_preserved, 1
.global _tx_thread_secure_stack_initialize
.thumb_func
.type _tx_thread_secure_stack_initialize, function
_tx_thread_secure_stack_initialize:
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
CPSIE i // Enable interrupts for SVC call
SVC 3
CPSID i // Disable interrupts
#else
MOV r0, #0xFF // Feature not enabled
#endif
BX lr
.end

Some files were not shown because too many files have changed in this diff Show More