Use AzureRTOS ThreadX

This commit is contained in:
2023-03-05 21:24:12 +01:00
parent f92a5ff28d
commit 2cadbff590
419 changed files with 89874 additions and 19575 deletions

View File

@ -0,0 +1,729 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/GNU */
/* 6.1.12 */
/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M3/M4/M7 files. It unifies */
/* the ARMv7-M architecture and compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Modified comments, updated */
/* typedef to fix misra */
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Modified comments and added */
/* volatile to registers, */
/* resulting in version 6.1.11 */
/* 07-29-2022 Scott Larson Modified comments and */
/* described BASEPRI usage, */
/* resulting in version 6.1.12 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm look similar */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif
#endif /* __ICCARM__ */
#ifdef __ghs__
#include <arm_ghs.h>
#include "tx_ghs.h"
#endif /* __ghs__ */
#if !defined(__GNUC__) && !defined(__CC_ARM)
#define __get_control_value __get_CONTROL
#define __set_control_value __set_CONTROL
#endif
#ifndef __GNUC__
#define __get_ipsr_value __get_IPSR
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* By default, ThreadX for Cortex-M uses the PRIMASK register to enable/disable interrupts.
If using BASEPRI is desired, define the following two symbols for both c and assembly files:
TX_PORT_USE_BASEPRI - This tells ThreadX to use BASEPRI instead of PRIMASK.
TX_PORT_BASEPRI = (priority_mask << (8 - number_priority_bits)) - this defines the maximum priority level to mask.
Any interrupt with a higher priority than priority_mask will not be masked, thus the interrupt will run.
*/
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
#ifdef __ghs__
/* Define constants for Green Hills EventAnalyzer. */
/* Define the number of ticks per second. This informs the EventAnalyzer what the timestamps
represent. By default, this is set to 1,000,000 i.e., one tick every microsecond. */
#define TX_EL_TICKS_PER_SECOND 1000000
/* Define the method of how to get the upper and lower 32-bits of the time stamp. By default, simply
simulate the time-stamp source with a counter. */
#define read_tbu() _tx_el_time_base_upper
#define read_tbl() ++_tx_el_time_base_lower
#endif /* __ghs__ */
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#elif defined(__ghs__)
#define TX_THREAD_EXTENSION_2 VOID * tx_thread_eh_globals; \
int Errno; /* errno. */ \
char * strtok_saved_pos; /* strtok() position. */
#else
#define TX_THREAD_EXTENSION_2
#endif
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#if (__VER__ < 8000000)
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = __iar_dlib_perthread_allocate();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) __iar_dlib_perthread_deallocate(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL;
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION __iar_dlib_perthread_access(0);
#else
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#endif
#else
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
ULONG _tx_misra_control_get(void);
void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else /* TX_MISRA_ENABLE not defined */
/* Define some helper functions (these are intrinsics in some compilers). */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG __get_control_value(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void __set_control_value(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#elif defined(__CC_ARM) /* ARM Compiler 5 */
__attribute__( ( always_inline ) ) ULONG __get_control_value(void)
{
ULONG control_value;
__asm volatile ("MRS control_value,CONTROL");
return(control_value);
}
__attribute__( ( always_inline ) ) void __set_control_value(ULONG control_value)
{
__asm__ volatile ("MSR CONTROL,control_value");
}
/* Can't access VFP registers with inline asm, so define this in tx_thread_schedule. */
void _tx_vfp_access(void);
#define TX_VFP_TOUCH() _tx_vfp_access();
#elif defined(__ICCARM__) /* IAR */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* Helper functions for different compilers */
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_control_value(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_control_value(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_control_value(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_control_value(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_control_value(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = __get_control_value(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_control_value(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#ifdef __CC_ARM /* ARM Compiler 5 */
register unsigned int _ipsr __asm("ipsr");
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _ipsr)
#elif defined(__GNUC__) /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline unsigned int __get_ipsr_value(void)
{
unsigned int ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_ipsr_value())
#elif defined(__ICCARM__) /* IAR */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
#endif /* TX_THREAD_GET_SYSTEM_STATE for different compilers */
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
zero after initialization for Cortex-M ports. */
#ifndef TX_THREAD_SYSTEM_RETURN_CHECK
#define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
#ifndef TX_DISABLE_INLINE
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__CC_ARM) /* AC5 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __clz(__rbit((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#endif
/* Define the interrupt disable/restore macros for each compiler. */
#if defined(__GNUC__) || defined(__ICCARM__)
/*** GCC/AC6 and IAR ***/
__attribute__( ( always_inline ) ) static inline unsigned int __get_interrupt_posture(void)
{
unsigned int posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(unsigned int basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(unsigned int int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
//__asm__ volatile ("MSR BASEPRI,%0": : "r" (int_posture): "memory");
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
{
unsigned int int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/*** End GCC/AC6 and IAR ***/
#elif defined(__CC_ARM)
/*** AC5 ***/
static __inline unsigned int __get_interrupt_posture(void)
{
unsigned int posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS #posture, BASEPRI");
#else
__asm__ volatile ("MRS #posture, PRIMASK");
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
static __inline void __set_basepri_value(unsigned int basepri_value)
{
__asm__ volatile ("MSR BASEPRI, #basepri_value");
}
#endif
static __inline unsigned int __disable_interrupts(void)
{
unsigned int int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i");
#endif
return(int_posture);
}
static __inline void __restore_interrupt(unsigned int int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK, #int_posture");
#endif
}
static void _tx_thread_system_return_inline(void)
{
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
interrupt_save = __get_interrupt_posture();
__set_basepri_value(0);
__set_basepri_value(interrupt_save);
#else
interrupt_save = __disable_irq();
__enable_irq();
if (interrupt_save != 0)
__disable_irq();
#endif
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/*** End AC5 ***/
#endif /* Interrupt disable/restore macros for each compiler. */
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define FPU extension for the Cortex-M. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
void tx_thread_fpu_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/GNU Version 6.1.12 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
#else
extern CHAR _tx_version_id[];
#endif
#endif
#endif

View File

@ -0,0 +1,719 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX MISRA Compliance */
/** */
/**************************************************************************/
/**************************************************************************/
#define SHT_PROGBITS 0x1
.global __aeabi_memset
.global _tx_thread_current_ptr
.global _tx_thread_interrupt_disable
.global _tx_thread_interrupt_restore
.global _tx_thread_stack_analyze
.global _tx_thread_stack_error_handler
.global _tx_thread_system_state
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_trace_buffer_current_ptr
.global _tx_trace_buffer_end_ptr
.global _tx_trace_buffer_start_ptr
.global _tx_trace_event_enable_bits
.global _tx_trace_full_notify_function
.global _tx_trace_header_ptr
#endif
.global _tx_misra_always_true
.global _tx_misra_block_pool_to_uchar_pointer_convert
.global _tx_misra_byte_pool_to_uchar_pointer_convert
.global _tx_misra_char_to_uchar_pointer_convert
.global _tx_misra_const_char_to_char_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_entry_to_uchar_pointer_convert
#endif
.global _tx_misra_indirect_void_to_uchar_pointer_convert
.global _tx_misra_memset
.global _tx_misra_message_copy
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_object_to_uchar_pointer_convert
#endif
.global _tx_misra_pointer_to_ulong_convert
.global _tx_misra_status_get
.global _tx_misra_thread_stack_check
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_time_stamp_get
#endif
.global _tx_misra_timer_indirect_to_void_pointer_convert
.global _tx_misra_timer_pointer_add
.global _tx_misra_timer_pointer_dif
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_trace_event_insert
#endif
.global _tx_misra_uchar_pointer_add
.global _tx_misra_uchar_pointer_dif
.global _tx_misra_uchar_pointer_sub
.global _tx_misra_uchar_to_align_type_pointer_convert
.global _tx_misra_uchar_to_block_pool_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_entry_pointer_convert
.global _tx_misra_uchar_to_header_pointer_convert
#endif
.global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
.global _tx_misra_uchar_to_indirect_uchar_pointer_convert
#ifdef TX_ENABLE_EVENT_TRACE
.global _tx_misra_uchar_to_object_pointer_convert
#endif
.global _tx_misra_uchar_to_void_pointer_convert
.global _tx_misra_ulong_pointer_add
.global _tx_misra_ulong_pointer_dif
.global _tx_misra_ulong_pointer_sub
.global _tx_misra_ulong_to_pointer_convert
.global _tx_misra_ulong_to_thread_pointer_convert
.global _tx_misra_user_timer_pointer_get
.global _tx_misra_void_to_block_pool_pointer_convert
.global _tx_misra_void_to_byte_pool_pointer_convert
.global _tx_misra_void_to_event_flags_pointer_convert
.global _tx_misra_void_to_indirect_uchar_pointer_convert
.global _tx_misra_void_to_mutex_pointer_convert
.global _tx_misra_void_to_queue_pointer_convert
.global _tx_misra_void_to_semaphore_pointer_convert
.global _tx_misra_void_to_thread_pointer_convert
.global _tx_misra_void_to_uchar_pointer_convert
.global _tx_misra_void_to_ulong_pointer_convert
.global _tx_misra_ipsr_get
.global _tx_misra_control_get
.global _tx_misra_control_set
#ifdef __ARM_FP
.global _tx_misra_fpccr_get
.global _tx_misra_vfp_touch
#endif
.global _tx_misra_event_flags_group_not_used
.global _tx_misra_event_flags_set_notify_not_used
.global _tx_misra_queue_not_used
.global _tx_misra_queue_send_notify_not_used
.global _tx_misra_semaphore_not_used
.global _tx_misra_semaphore_put_notify_not_used
.global _tx_misra_thread_entry_exit_notify_not_used
.global _tx_misra_thread_not_used
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
.thumb_func
_tx_misra_memset:
PUSH {R4,LR}
MOVS R4,R0
MOVS R0,R2
MOVS R2,R1
MOVS R1,R0
MOVS R0,R4
BL __aeabi_memset
POP {R4,PC} // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_add:
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_sub:
RSBS R1,R1,#+0
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_uchar_pointer_dif:
SUBS R0,R0,R1
BX LR // return
/************************************************************************************************************************************/
/************************************************************************************************************************************/
/** */
/** This single function serves all of the below prototypes. */
/** */
/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
/** VOID _tx_misra_event_flags_group_not_used(TX_EVENT_FLAGS_GROUP *group_ptr); */
/** VOID _tx_misra_event_flags_set_notify_not_used(VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr)); */
/** VOID _tx_misra_queue_not_used(TX_QUEUE *queue_ptr); */
/** VOID _tx_misra_queue_send_notify_not_used(VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr)); */
/** VOID _tx_misra_semaphore_not_used(TX_SEMAPHORE *semaphore_ptr); */
/** VOID _tx_misra_semaphore_put_notify_not_used(VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr)); */
/** VOID _tx_misra_thread_not_used(TX_THREAD *thread_ptr); */
/** VOID _tx_misra_thread_entry_exit_notify_not_used(VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT id)); */
/** */
/************************************************************************************************************************************/
/************************************************************************************************************************************/
.text
.thumb_func
_tx_misra_pointer_to_ulong_convert:
_tx_misra_ulong_to_pointer_convert:
_tx_misra_indirect_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_indirect_uchar_pointer_convert:
_tx_misra_block_pool_to_uchar_pointer_convert:
_tx_misra_void_to_block_pool_pointer_convert:
_tx_misra_void_to_uchar_pointer_convert:
_tx_misra_uchar_to_block_pool_pointer_convert:
_tx_misra_void_to_indirect_uchar_pointer_convert:
_tx_misra_void_to_byte_pool_pointer_convert:
_tx_misra_byte_pool_to_uchar_pointer_convert:
_tx_misra_uchar_to_align_type_pointer_convert:
_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
_tx_misra_void_to_event_flags_pointer_convert:
_tx_misra_void_to_ulong_pointer_convert:
_tx_misra_void_to_mutex_pointer_convert:
_tx_misra_void_to_queue_pointer_convert:
_tx_misra_void_to_semaphore_pointer_convert:
_tx_misra_uchar_to_void_pointer_convert:
_tx_misra_ulong_to_thread_pointer_convert:
_tx_misra_timer_indirect_to_void_pointer_convert:
_tx_misra_const_char_to_char_pointer_convert:
_tx_misra_void_to_thread_pointer_convert:
#ifdef TX_ENABLE_EVENT_TRACE
_tx_misra_object_to_uchar_pointer_convert:
_tx_misra_uchar_to_object_pointer_convert:
_tx_misra_uchar_to_header_pointer_convert:
_tx_misra_uchar_to_entry_pointer_convert:
_tx_misra_entry_to_uchar_pointer_convert:
#endif
_tx_misra_char_to_uchar_pointer_convert:
_tx_misra_event_flags_group_not_used:
_tx_misra_event_flags_set_notify_not_used:
_tx_misra_queue_not_used:
_tx_misra_queue_send_notify_not_used:
_tx_misra_semaphore_not_used:
_tx_misra_semaphore_put_notify_not_used:
_tx_misra_thread_entry_exit_notify_not_used:
_tx_misra_thread_not_used:
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_sub:
MVNS R2,#+3
MULS R1,R2,R1
ADD R0,R0,R1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_ulong_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
/** UINT size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_message_copy:
PUSH {R4,R5}
LDR R3,[R0, #+0]
LDR R4,[R1, #+0]
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
CMP R2,#+2
BCC.N _tx_misra_message_copy_0
SUBS R2,R2,#+1
B.N _tx_misra_message_copy_1
_tx_misra_message_copy_2:
LDR R5,[R3, #+0]
STR R5,[R4, #+0]
ADDS R4,R4,#+4
ADDS R3,R3,#+4
SUBS R2,R2,#+1
_tx_misra_message_copy_1:
CMP R2,#+0
BNE.N _tx_misra_message_copy_2
_tx_misra_message_copy_0:
STR R3,[R0, #+0]
STR R4,[R1, #+0]
POP {R4,R5}
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
/** TX_TIMER_INTERNAL **ptr2); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_dif:
SUBS R0,R0,R1
ASRS R0,R0,#+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
/** **ptr1, ULONG size); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_timer_pointer_add:
ADD R0,R0,R1, LSL #+2
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
/** *internal_timer, TX_TIMER **user_timer); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_user_timer_pointer_get:
SUBS R0,#8
STR R0,[R1, #+0]
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
/** VOID **highest_stack); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_thread_stack_check:
PUSH {R3-R5,LR}
MOVS R4,R0
MOVS R5,R1
BL _tx_thread_interrupt_disable
CMP R4,#+0
BEQ.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+0]
LDR R2,=0x54485244
CMP R1,R2
BNE.N _tx_misra_thread_stack_check_0
LDR R1,[R4, #+8]
LDR R2,[R5, #+0]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_1
LDR R1,[R4, #+8]
STR R1,[R5, #+0]
_tx_misra_thread_stack_check_1:
LDR R1,[R4, #+12]
LDR R1,[R1, #+0]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R4, #+16]
LDR R1,[R1, #+1]
CMP R1,#-269488145
BNE.N _tx_misra_thread_stack_check_2
LDR R1,[R5, #+0]
LDR R2,[R4, #+12]
CMP R1,R2
BCS.N _tx_misra_thread_stack_check_3
_tx_misra_thread_stack_check_2:
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_error_handler
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_3:
LDR R1,[R5, #+0]
LDR R1,[R1, #-4]
CMP R1,#-269488145
BEQ.N _tx_misra_thread_stack_check_0
BL _tx_thread_interrupt_restore
MOVS R0,R4
BL _tx_thread_stack_analyze
BL _tx_thread_interrupt_disable
_tx_misra_thread_stack_check_0:
BL _tx_thread_interrupt_restore
POP {R0,R4,R5,PC} // return
#ifdef TX_ENABLE_EVENT_TRACE
/**************************************************************************/
/**************************************************************************/
/** */
/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_trace_event_insert:
PUSH {R3-R7,LR}
LDR.N R4,DataTable2_1
LDR R4,[R4, #+0]
CMP R4,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_2
LDR R5,[R5, #+0]
LDR R6,[SP, #+28]
TST R5,R6
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R5,DataTable2_3
LDR R5,[R5, #+0]
LDR.N R6,DataTable2_4
LDR R6,[R6, #+0]
CMP R5,#+0
BNE.N _tx_misra_trace_event_insert_1
LDR R5,[R6, #+44]
LDR R7,[R6, #+60]
LSLS R7,R7,#+16
ORRS R7,R7,#0x80000000
ORRS R5,R7,R5
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_1:
CMP R5,#-252645136
BCS.N _tx_misra_trace_event_insert_3
MOVS R5,R6
MOVS R6,#-1
B.N _tx_misra_trace_event_insert_2
_tx_misra_trace_event_insert_3:
MOVS R6,#-252645136
MOVS R5,#+0
_tx_misra_trace_event_insert_2:
STR R6,[R4, #+0]
STR R5,[R4, #+4]
STR R0,[R4, #+8]
LDR R0,[SP, #+32]
STR R0,[R4, #+12]
STR R1,[R4, #+16]
STR R2,[R4, #+20]
STR R3,[R4, #+24]
LDR R0,[SP, #+24]
STR R0,[R4, #+28]
ADDS R4,R4,#+32
LDR.N R0,DataTable2_5
LDR R0,[R0, #+0]
CMP R4,R0
BCC.N _tx_misra_trace_event_insert_4
LDR.N R0,DataTable2_6
LDR R4,[R0, #+0]
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
LDR.N R0,DataTable2_8
LDR R0,[R0, #+0]
CMP R0,#+0
BEQ.N _tx_misra_trace_event_insert_0
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
LDR.N R1,DataTable2_8
LDR R1,[R1, #+0]
BLX R1
B.N _tx_misra_trace_event_insert_0
_tx_misra_trace_event_insert_4:
LDR.N R0,DataTable2_1
STR R4,[R0, #+0]
LDR.N R0,DataTable2_7
LDR R0,[R0, #+0]
STR R4,[R0, #+32]
_tx_misra_trace_event_insert_0:
POP {R0,R4-R7,PC} // return
.data
DataTable2_1:
.word _tx_trace_buffer_current_ptr
.data
DataTable2_2:
.word _tx_trace_event_enable_bits
.data
DataTable2_5:
.word _tx_trace_buffer_end_ptr
.data
DataTable2_6:
.word _tx_trace_buffer_start_ptr
.data
DataTable2_7:
.word _tx_trace_header_ptr
.data
DataTable2_8:
.word _tx_trace_full_notify_function
/**************************************************************************/
/**************************************************************************/
/** */
/** ULONG _tx_misra_time_stamp_get(VOID); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_time_stamp_get:
MOVS R0,#+0
BX LR // return
#endif
.data
DataTable2_3:
.word _tx_thread_system_state
.data
DataTable2_4:
.word _tx_thread_current_ptr
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_always_true(void); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_always_true:
MOVS R0,#+1
BX LR // return
/**************************************************************************/
/**************************************************************************/
/** */
/** UINT _tx_misra_status_get(UINT status); */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.thumb_func
_tx_misra_status_get:
MOVS R0,#+0
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_ipsr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_ipsr_get:
MRS R0, IPSR
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_control_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_get:
MRS R0, CONTROL
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_control_set(ULONG value); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_control_set:
MSR CONTROL, R0
BX LR // return
#ifdef __ARM_FP
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** ULONG _tx_misra_fpccr_get(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_fpccr_get:
LDR r0, =0xE000EF34 // Build FPCCR address
LDR r0, [r0] // Load FPCCR value
BX LR // return
/***********************************************************************************************/
/***********************************************************************************************/
/** */
/** void _tx_misra_vfp_touch(void); */
/** */
/***********************************************************************************************/
/***********************************************************************************************/
.text
.thumb_func
_tx_misra_vfp_touch:
vmov.f32 s0, s0
BX LR // return
#endif
.data
.word 0

View File

@ -0,0 +1,82 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
.global _tx_execution_isr_exit
#endif
.text
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is only needed for legacy applications and it should */
/* not be called in any new development on a Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_exit] Execution profiling ISR exit */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.global _tx_thread_context_restore
.thumb_func
_tx_thread_context_restore:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_exit // Call the ISR exit function
POP {r0, lr} // Recover return address
#endif
BX lr
// }

View File

@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is only needed for legacy applications and it should */
/* not be called in any new development on a Cortex-M. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* [_tx_execution_isr_enter] Execution profiling ISR enter */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.global _tx_thread_context_save
.thumb_func
_tx_thread_context_save:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is starting. */
PUSH {r0, lr} // Save return address
BL _tx_execution_isr_enter // Call the ISR enter function
POP {r0, lr} // Recover return address
#endif
/* Context is already saved - just return. */
BX lr
// }

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text 32
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_control(UINT new_posture)
// {
.global _tx_thread_interrupt_control
.thumb_func
_tx_thread_interrupt_control:
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Pickup current interrupt posture
MSR BASEPRI, r0 // Apply the new interrupt posture
MOV r0, r1 // Transfer old to return register
#else
MRS r1, PRIMASK // Pickup current interrupt lockout
MSR PRIMASK, r0 // Apply the new interrupt lockout
MOV r0, r1 // Transfer old to return register
#endif
BX lr // Return to caller
// }

View File

@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text 32
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_disable Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for disabling interrupts and returning */
/* the previous interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// UINT _tx_thread_interrupt_disable(VOID)
// {
.global _tx_thread_interrupt_disable
.thumb_func
_tx_thread_interrupt_disable:
/* Return current interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MRS r0, BASEPRI
LDR r1, =TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
MRS r0, PRIMASK
CPSID i
#endif
BX lr
// }

View File

@ -0,0 +1,76 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text 32
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_restore Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for restoring the previous */
/* interrupt lockout posture. */
/* */
/* INPUT */
/* */
/* previous_posture Previous interrupt posture */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_interrupt_restore(UINT previous_posture)
// {
.global _tx_thread_interrupt_restore
.thumb_func
_tx_thread_interrupt_restore:
/* Restore previous interrupt lockout posture. */
#ifdef TX_PORT_USE_BASEPRI
MSR BASEPRI, r0
#else
MSR PRIMASK, r0
#endif
BX lr
// }

View File

@ -0,0 +1,325 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_execution_thread_enter
.global _tx_execution_thread_exit
#ifdef TX_LOW_POWER
.global tx_low_power_enter
.global tx_low_power_exit
#endif
.text
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/GNU */
/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* 04-25-2022 Scott Larson Added BASEPRI support, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.global _tx_thread_schedule
.thumb_func
_tx_thread_schedule:
/* This function should only ever be called on Cortex-M
from the first schedule request. Subsequent scheduling occurs
from the PendSV handling routine below. */
/* Clear the preempt-disable flag to enable rescheduling after initialization on Cortex-M targets. */
MOV r0, #0 // Build value for TX_FALSE
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
#ifdef __ARM_FP
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
MSR CONTROL, r0 // Setup new CONTROL register
#endif
/* Enable interrupts */
CPSIE i
/* Enter the scheduler for the first time. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
DSB // Complete all memory accesses
ISB // Flush pipeline
/* Wait here for the PendSV to take place. */
__tx_wait_here:
B __tx_wait_here // Wait for the PendSV to happen
// }
/* Generic context switching PendSV handler. */
.global PendSV_Handler
.global __tx_PendSVHandler
.syntax unified
.thumb_func
PendSV_Handler:
.thumb_func
__tx_PendSVHandler:
/* Get current thread value and new thread pointer. */
__tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
#ifdef TX_PORT_USE_BASEPRI
MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r0
#else
CPSIE i // Enable interrupts
#endif /* TX_PORT_USE_BASEPRI */
#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
MOV r3, #0 // Build NULL value
LDR r1, [r0] // Pickup current thread pointer
/* Determine if there is a current thread to finish preserving. */
CBZ r1, __tx_ts_new // If NULL, skip preservation
/* Recover PSP and preserve current thread context. */
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
_skip_vfp_save:
#endif
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
STMDB r12!, {LR} // Save LR on the stack
/* Determine if time-slice is active. If it isn't, skip time handling processing. */
LDR r5, [r4] // Pickup current time-slice
STR r12, [r1, #8] // Save the thread stack pointer
CBZ r5, __tx_ts_new // If not active, skip processing
/* Time-slice is active, save the current thread's time-slice and clear the global time-slice variable. */
STR r5, [r1, #24] // Save current time-slice
/* Clear the global time-slice. */
STR r3, [r4] // Clear time-slice
/* Executing thread is now completely preserved!!! */
__tx_ts_new:
/* Now we are looking for a new thread to execute! */
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
/* Increment the thread run count. */
__tx_ts_restore:
LDR r7, [r1, #4] // Pickup the current thread run count
LDR r4, =_tx_timer_time_slice // Build address of time-slice variable
LDR r5, [r1, #24] // Pickup thread's current time-slice
ADD r7, r7, #1 // Increment the thread run count
STR r7, [r1, #4] // Store the new run count
/* Setup global time-slice with thread's current time-slice. */
STR r5, [r4] // Setup global time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
PUSH {r0, r1} // Save r0 and r1
BL _tx_execution_thread_enter // Call the thread execution enter function
POP {r0, r1} // Recover r0 and r1
#endif
/* Restore the thread context and PSP. */
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers
_skip_vfp_restore:
#endif
LDMIA r12!, {r4-r11} // Recover thread's registers
MSR PSP, r12 // Setup the thread's stack pointer
/* Return to thread. */
BX lr // Return to thread!
/* The following is the idle wait processing... in this case, no threads are ready for execution and the
system will simply be idle until an interrupt occurs that makes a thread ready. Note that interrupts
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
#ifdef TX_PORT_USE_BASEPRI
LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
MSR BASEPRI, r1
#else
CPSID i // Disable interrupts
#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_enter // Possibly enter low power mode
POP {r0-r3}
#endif
#ifdef TX_ENABLE_WFI
DSB // Ensure no outstanding memory transactions
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
#ifdef TX_LOW_POWER
PUSH {r0-r3}
BL tx_low_power_exit // Exit low power mode
POP {r0-r3}
#endif
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
already in the handler! */
__tx_ts_ready:
MOV r7, #0x08000000 // Build clear PendSV value
MOV r8, #0xE000E000 // Build base NVIC address
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
#ifdef TX_PORT_USE_BASEPRI
MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
MSR BASEPRI, r4
#else
CPSIE i // Enable interrupts
#endif
B __tx_ts_restore // Restore the thread
// }
#ifdef __ARM_FP
.global tx_thread_fpu_enable
.thumb_func
tx_thread_fpu_enable:
.global tx_thread_fpu_disable
.thumb_func
tx_thread_fpu_disable:
/* Automatic VPF logic is supported, this function is present only for
backward compatibility purposes and therefore simply returns. */
BX LR // Return to caller
#endif

View File

@ -0,0 +1,133 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.global _tx_thread_stack_build
.thumb_func
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-M should look like the following after it is built:
Stack Top:
LR Interrupted LR (LR at time of PENDSV)
r4 Initial value for r4
r5 Initial value for r5
r6 Initial value for r6
r7 Initial value for r7
r8 Initial value for r8
r9 Initial value for r9
r10 Initial value for r10
r11 Initial value for r11
r0 Initial value for r0 (Hardware stack starts here!!)
r1 Initial value for r1
r2 Initial value for r2
r3 Initial value for r3
r12 Initial value for r12
lr Initial value for lr
pc Initial value for pc
xPSR Initial value for xPSR
Stack Bottom: (higher memory address) */
LDR r2, [r0, #16] // Pickup end of stack area
BIC r2, r2, #0x7 // Align frame for 8-byte alignment
SUB r2, r2, #68 // Subtract frame size
LDR r3, =0xFFFFFFFD // Build initial LR value
STR r3, [r2, #0] // Save on the stack
/* Actually build the stack frame. */
MOV r3, #0 // Build initial register value
STR r3, [r2, #4] // Store initial r4
STR r3, [r2, #8] // Store initial r5
STR r3, [r2, #12] // Store initial r6
STR r3, [r2, #16] // Store initial r7
STR r3, [r2, #20] // Store initial r8
STR r3, [r2, #24] // Store initial r9
STR r3, [r2, #28] // Store initial r10
STR r3, [r2, #32] // Store initial r11
/* Hardware stack follows. */
STR r3, [r2, #36] // Store initial r0
STR r3, [r2, #40] // Store initial r1
STR r3, [r2, #44] // Store initial r2
STR r3, [r2, #48] // Store initial r3
STR r3, [r2, #52] // Store initial r12
MOV r3, #0xFFFFFFFF // Poison EXC_RETURN value
STR r3, [r2, #56] // Store initial lr
STR r1, [r2, #60] // Store initial pc
MOV r3, #0x01000000 // Only T-bit need be set
STR r3, [r2, #64] // Store initial xPSR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = r2;
STR r2, [r0, #8] // Save stack pointer in thread's
// control block
BX lr // Return to caller
// }

View File

@ -0,0 +1,93 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
.text 32
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return Cortex-M7/GNU */
/* 6.1.7 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.thumb_func
.global _tx_thread_system_return
_tx_thread_system_return:
/* Return to real scheduler via PendSV. Note that this routine is often
replaced with in-line assembly in tx_port.h to improved performance. */
MOV r0, #0x10000000 // Load PENDSVSET bit
MOV r1, #0xE000E000 // Load NVIC base
STR r0, [r1, #0xD04] // Set PENDSVBIT in ICSR
MRS r0, IPSR // Pickup IPSR
CMP r0, #0 // Is it a thread returning?
BNE _isr_context // If ISR, skip interrupt enable
#ifdef TX_PORT_USE_BASEPRI
MRS r1, BASEPRI // Thread context returning, pickup BASEPRI
MOV r0, #0
MSR BASEPRI, r0 // Enable interrupts
MSR BASEPRI, r1 // Restore original interrupt posture
#else
MRS r1, PRIMASK // Thread context returning, pickup PRIMASK
CPSIE i // Enable interrupts
MSR PRIMASK, r1 // Restore original interrupt posture
#endif
_isr_context:
BX lr // Return to caller
// }

View File

@ -0,0 +1,255 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
.global _tx_timer_list_start
.global _tx_timer_list_end
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
.global _tx_timer_expiration_process
.text
.align 4
.syntax unified
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt Cortex-M7/GNU */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* expiration functions are called. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Modified comment(s), added */
/* TX_NO_TIMER support, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
#ifndef TX_NO_TIMER
.global _tx_timer_interrupt
.thumb_func
_tx_timer_interrupt:
/* Upon entry to this routine, it is assumed that the compiler scratch registers are available
for use. */
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR r1, =_tx_timer_system_clock // Pickup address of system clock
LDR r0, [r1, #0] // Pickup system clock
ADD r0, r0, #1 // Increment system clock
STR r0, [r1, #0] // Store new system clock
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
LDR r2, [r3, #0] // Pickup time-slice
CBZ r2, __tx_timer_no_time_slice // Is it non-active?
// Yes, skip time-slice processing
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
SUB r2, r2, #1 // Decrement the time-slice
STR r2, [r3, #0] // Store new time-slice value
/* Check for expiration. */
// if (__tx_timer_time_slice == 0)
CBNZ r2, __tx_timer_no_time_slice // Has it expired?
// No, skip expiration processing
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
MOV r0, #1 // Build expired value
STR r0, [r3, #0] // Set time-slice expiration flag
// }
__tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
LDR r0, [r1, #0] // Pickup current timer
LDR r2, [r0, #0] // Pickup timer list entry
CBZ r2, __tx_timer_no_timer // Is there anything in the list?
// No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR r3, =_tx_timer_expired // Pickup expiration flag address
MOV r2, #1 // Build expired value
STR r2, [r3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD r0, r0, #4 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR r3, =_tx_timer_list_end // Pickup addr of timer list end
LDR r2, [r3, #0] // Pickup list end
CMP r0, r2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR r3, =_tx_timer_list_start // Pickup addr of timer list start
LDR r0, [r3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR r0, [r1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR r2, [r3, #0] // Pickup time-slice expired flag
CBNZ r2, __tx_something_expired // Did a time-slice expire?
// If non-zero, time-slice expired
LDR r1, =_tx_timer_expired // Pickup addr of other expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_nothing_expired // Did a timer expire?
// No, nothing expired
__tx_something_expired:
STMDB sp!, {r0, lr} // Save the lr register on the stack
// and save r0 just to keep 8-byte alignment
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR r1, =_tx_timer_expired // Pickup addr of expired flag
LDR r0, [r1, #0] // Pickup timer expired flag
CBZ r0, __tx_timer_dont_activate // Check for timer expiration
// If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
LDR r3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR r2, [r3, #0] // Pickup the actual flag
CBZ r2, __tx_timer_not_ts_expiration // See if the flag is set
// No, skip time-slice processing
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
LDR r0, =_tx_thread_preempt_disable // Build address of preempt disable flag
LDR r1, [r0] // Is the preempt disable flag set?
CBNZ r1, __tx_timer_skip_time_slice // Yes, skip the PendSV logic
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r1, [r0] // Pickup the current thread pointer
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
LDR r3, [r2] // Pickup the execute thread pointer
LDR r0, =0xE000ED04 // Build address of control register
LDR r2, =0x10000000 // Build value for PendSV bit
CMP r1, r3 // Are they the same?
BEQ __tx_timer_skip_time_slice // If the same, there was no time-slice performed
STR r2, [r0] // Not the same, issue the PendSV for preemption
__tx_timer_skip_time_slice:
// }
__tx_timer_not_ts_expiration:
LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
// the 8-byte stack alignment
// }
__tx_timer_nothing_expired:
DSB // Complete all memory access
BX lr // Return to caller
// }
#endif