RTX5: added support for Cortex-A (preliminary)
diff --git a/ARM.CMSIS.pdsc b/ARM.CMSIS.pdsc
index 5724381..7cdf8ac 100644
--- a/ARM.CMSIS.pdsc
+++ b/ARM.CMSIS.pdsc
@@ -13,6 +13,8 @@
- Added ARM Compiler 6 support
- Updated Cortex-A core functions
- Updated Startup and System files
+ CMSIS-RTOS2:
+ - RTX5: added support for Cortex-A
</release>
<release version="5.0.2-dev2">
CMSIS-RTOS2:
@@ -662,8 +664,8 @@
<conditions>
<!-- compiler -->
<condition id="ARMCC6">
- <accept Tcompiler="ARMCC" Toptions="AC6"/>
- <accept Tcompiler="ARMCC" Toptions="AC6LTO"/>
+ <accept Tcompiler="ARMCC" Toptions="AC6"/>
+ <accept Tcompiler="ARMCC" Toptions="AC6LTO"/>
</condition>
<condition id="ARMCC">
<require Tcompiler="ARMCC" Toptions="AC5"/>
@@ -825,7 +827,25 @@
<require Dcore="ARMV8MML" Ddsp="DSP" Dfpu="SP_FPU"/>
</condition>
+ <!-- Cortex-A Devices -->
+ <condition id="RZ_A Device">
+ <description>Renesas RZ_A Device</description>
+ <require Dvendor="Renesas:117"/>
+ <require Dfamily="RZ_A"/>
+ </condition>
+ <condition id="Unknown Cortex-A Device">
+ <description>Unknown Cortex-A Device</description>
+ <require condition="ARMv7-A Device"/>
+ <deny condition="RZ_A Device"/>
+ </condition>
+
<!-- ARMCC compiler -->
+ <condition id="CA_ARMCC">
+ <description>Cortex-A5, Cortex-A7 or Cortex-A9 processor based device for the ARM Compiler</description>
+ <require condition="ARMv7-A Device"/>
+ <require Tcompiler="ARMCC"/>
+ </condition>
+
<condition id="CM0_ARMCC">
<description>Cortex-M0 or Cortex-M0+ or SC000 processor based device for the ARM Compiler</description>
<require condition="CM0"/>
@@ -1157,6 +1177,12 @@
</condition>
<!-- GCC compiler -->
+ <condition id="CA_GCC">
+ <description>Cortex-A5, Cortex-A7 or Cortex-A9 processor based device for the GCC Compiler</description>
+ <require condition="ARMv7-A Device"/>
+ <require Tcompiler="GCC"/>
+ </condition>
+
<condition id="CM0_GCC">
<description>Cortex-M0 or Cortex-M0+ or SC000 processor based device for the GCC Compiler</description>
<require condition="CM0"/>
@@ -1488,6 +1514,12 @@
</condition>
<!-- IAR compiler -->
+ <condition id="CA_IAR">
+ <description>Cortex-A5, Cortex-A7 or Cortex-A9 processor based device for the IAR Compiler</description>
+ <require condition="ARMv7-A Device"/>
+ <require Tcompiler="IAR"/>
+ </condition>
+
<condition id="CM0_IAR">
<description>Cortex-M0 or Cortex-M0+ or SC000 processor based device for the IAR Compiler</description>
<require condition="CM0"/>
@@ -1781,6 +1813,14 @@
</condition>
<condition id="RTOS2 RTX5">
<description>Components required for RTOS2 RTX5</description>
+ <accept condition="ARMv6_7_8-M Device"/>
+ <accept condition="ARMv7-A Device"/>
+ <require condition="ARMCC GCC IAR"/>
+ <require Cclass="CMSIS" Cgroup="CORE"/>
+ <require Cclass="Device" Cgroup="Startup"/>
+ </condition>
+ <condition id="RTOS2 RTX5 Lib">
+ <description>Components required for RTOS2 RTX5 Library</description>
<require condition="ARMv6_7_8-M Device"/>
<require condition="ARMCC GCC IAR"/>
<require Cclass="CMSIS" Cgroup="CORE"/>
@@ -2330,7 +2370,7 @@
</component>
<!-- CMSIS-RTOS2 Keil RTX5 component -->
- <component Cclass="CMSIS" Cgroup="RTOS2" Csub="Keil RTX5" Cvariant="Library" Cversion="5.1.1" Capiversion="2.1.0" condition="RTOS2 RTX5">
+ <component Cclass="CMSIS" Cgroup="RTOS2" Csub="Keil RTX5" Cvariant="Library" Cversion="5.1.1" Capiversion="2.1.0" condition="RTOS2 RTX5 Lib">
<description>CMSIS-RTOS2 RTX5 for Cortex-M, SC000, C300 and ARMv8-M (Library)</description>
<RTE_Components_h>
<!-- the following content goes into file 'RTE_Components.h' -->
@@ -2465,8 +2505,12 @@
<file category="header" name="CMSIS/RTOS2/RTX/Include/rtx_os.h"/>
<!-- RTX configuration -->
- <file category="header" attr="config" name="CMSIS/RTOS2/RTX/Config/RTX_Config.h" version="5.1.0"/>
- <file category="source" attr="config" name="CMSIS/RTOS2/RTX/Config/RTX_Config.c" version="5.1.0"/>
+ <file category="header" attr="config" name="CMSIS/RTOS2/RTX/Config/RTX_Config.h" version="5.1.0"/>
+ <file category="source" attr="config" name="CMSIS/RTOS2/RTX/Config/RTX_Config.c" version="5.1.0" condition="ARMv6_7_8-M Device" />
+ <file category="source" attr="config" name="CMSIS/RTOS2/RTX/Config/Cortex_A/RTX_Config.c" version="5.1.0" condition="Unknown Cortex-A Device"/>
+ <file category="source" attr="config" name="CMSIS/RTOS2/RTX/Config/Renesas/RZ_A/RTX_Config.c" version="5.1.0" condition="RZ_A Device"/>
+
+ <file category="source" attr="config" name="CMSIS/RTOS2/RTX/Config/handlers.c" version="5.1.0" condition="ARMv7-A Device"/>
<!-- RTX templates -->
<file category="source" attr="template" name="CMSIS/RTOS2/RTX/Template/main.c" version="2.0.0" select="CMSIS-RTOS2 'main' function"/>
@@ -2492,9 +2536,11 @@
<file category="source" name="CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/rtx_system.c"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/rtx_evr.c"/>
+ <file category="source" name="CMSIS/RTOS2/RTX/Source/rtx_gic.c" condition="ARMv7-A Device"/>
<!-- RTX sources (library configuration) -->
<file category="source" name="CMSIS/RTOS2/RTX/Source/rtx_lib.c"/>
<!-- RTX sources (handlers ARMCC) -->
+ <file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s" condition="CA_ARMCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s" condition="CM0_ARMCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.s" condition="CM3_ARMCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.s" condition="CM4_ARMCC"/>
@@ -2508,6 +2554,7 @@
<file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mml.s" condition="ARMv8MML_ARMCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mml.s" condition="ARMv8MML_FP_ARMCC"/>
<!-- RTX sources (handlers GCC) -->
+ <file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S" condition="CA_GCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_cm0.S" condition="CM0_GCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_cm3.S" condition="CM3_GCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_cm3.S" condition="CM4_GCC"/>
@@ -2521,6 +2568,7 @@
<file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_armv8mml.S" condition="ARMv8MML_GCC"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/GCC/irq_armv8mml_fp.S" condition="ARMv8MML_FP_GCC"/>
<!-- RTX sources (handlers IAR) -->
+ <file category="source" name="CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s" condition="CA_IAR"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s" condition="CM0_IAR"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.s" condition="CM3_IAR"/>
<file category="source" name="CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.s" condition="CM4_IAR"/>
diff --git a/CMSIS/RTOS2/RTX/Config/Cortex_A/RTX_Config.c b/CMSIS/RTOS2/RTX/Config/Cortex_A/RTX_Config.c
new file mode 100644
index 0000000..7e640ad
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Config/Cortex_A/RTX_Config.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * $Revision: V5.1.0
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: RTX Configuration
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+#include "rtx_os.h"
+
+
+// Setup System Timer.
+// \return system timer IRQ number.
+int32_t osRtxSysTimerSetup (void) {
+ // ...
+ return (0);
+}
+
+// Enable System Timer.
+void osRtxSysTimerEnable (void) {
+ // ...
+}
+
+// Disable System Timer.
+void osRtxSysTimerDisable (void) {
+ // ...
+}
+
+// Acknowledge System Timer IRQ.
+void osRtxSysTimerAckIRQ (void) {
+ // ...
+}
+
+// Get System Timer count.
+// \return system timer count.
+uint32_t osRtxSysTimerGetCount (void) {
+ // ...
+ return (0U);
+}
+
+// Get System Timer frequency.
+// \return system timer frequency.
+uint32_t osRtxSysTimerGetFreq (void) {
+ // ...
+ return (1000000U);
+}
+
+
+// OS Idle Thread
+__WEAK __NO_RETURN void osRtxIdleThread (void *argument) {
+ (void)argument;
+
+ for (;;) {}
+}
+
+// OS Error Callback function
+__WEAK uint32_t osRtxErrorNotify (uint32_t code, void *object_id) {
+ (void)object_id;
+
+ switch (code) {
+ case osRtxErrorStackUnderflow:
+ // Stack underflow detected for thread (thread_id=object_id)
+ break;
+ case osRtxErrorISRQueueOverflow:
+ // ISR Queue overflow detected when inserting object (object_id)
+ break;
+ case osRtxErrorTimerQueueOverflow:
+ // User Timer Callback Queue overflow detected for timer (timer_id=object_id)
+ break;
+ case osRtxErrorClibSpace:
+ // Standard C/C++ library libspace not available: increase OS_THREAD_LIBSPACE_NUM
+ break;
+ case osRtxErrorClibMutex:
+ // Standard C/C++ library mutex initialization failed
+ break;
+ default:
+ break;
+ }
+ for (;;) {}
+//return 0U;
+}
diff --git a/CMSIS/RTOS2/RTX/Config/Renesas/RZ_A/RTX_Config.c b/CMSIS/RTOS2/RTX/Config/Renesas/RZ_A/RTX_Config.c
new file mode 100644
index 0000000..8058771
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Config/Renesas/RZ_A/RTX_Config.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * $Revision: V5.1.0
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: RTX Configuration
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+#include "rtx_os.h"
+
+
+// Define OS Timer channel and interrupt number
+#define OSTM OSTM0
+#define OSTM_IRQn OSTMI0TINT_IRQn
+
+static uint32_t ExtTim_Cnt; // Timer count used for overflow detection
+static uint32_t ExtTim_Freq; // Timer frequency
+
+
+// Get OS Timer current count value
+__STATIC_INLINE uint32_t OSTM_GetCount(void) {
+ ExtTim_Cnt = OSTM.OSTMnCNT;
+ return (OSTM.OSTMnCMP - ExtTim_Cnt);
+}
+
+// Check if OS Timer counter was reloaded
+__STATIC_INLINE uint32_t OSTM_GetOverflow(void) {
+ return ((OSTM.OSTMnCNT > ExtTim_Cnt) ? (1U) : (0U));
+}
+
+// Get OS Timer period
+__STATIC_INLINE uint32_t OSTM_GetPeriod(void) {
+ return (OSTM.OSTMnCMP + 1U);
+}
+
+
+// Setup System Timer.
+// \return system timer IRQ number.
+int32_t osRtxSysTimerSetup (void) {
+ uint32_t freq;
+
+ // Get CPG.FRQCR[IFC] bits
+ freq = (CPG.FRQCR >> 8) & 0x03;
+
+ // Determine Divider 2 output clock by using SystemCoreClock
+ if (freq == 0x03U) {
+ freq = (SystemCoreClock * 3U);
+ }
+ else if (freq == 0x01U) {
+ freq = (SystemCoreClock * 3U)/2U;
+ }
+ else {
+ freq = SystemCoreClock;
+ }
+ // Peripheral clock 0C = (Divider 2 clock * 1/12)
+ freq = freq / 12U;
+
+ // Determine tick frequency
+ freq = freq / osRtxConfig.tick_freq;
+
+ // Save frequency for later
+ ExtTim_Freq = freq;
+
+ // Enable OSTM clock
+ CPG.STBCR5 &= ~(CPG_STBCR5_BIT_MSTP51);
+
+ // Stop the OSTM counter
+ OSTM.OSTMnTT = 0x01U;
+
+ // Set interval timer mode and disable interrupts when counting starts
+ OSTM.OSTMnCTL = 0x00U;
+
+ // Set compare value
+ OSTM.OSTMnCMP = freq - 1U;
+
+ return (OSTM_IRQn);
+}
+
+// Enable System Timer.
+void osRtxSysTimerEnable (void) {
+ /* Start the OSTM counter */
+ OSTM.OSTMnTS = 0x01U;
+}
+
+// Disable System Timer.
+void osRtxSysTimerDisable (void) {
+ // Stop the OSTM counter
+ OSTM.OSTMnTT = 0x01U;
+}
+
+// Acknowledge System Timer IRQ.
+void osRtxSysTimerAckIRQ (void) {
+ // Acknowledge OSTM interrupt
+ GIC_ClearPendingIRQ (OSTM_IRQn);
+}
+
+// Get System Timer count.
+// \return system timer count.
+uint32_t osRtxSysTimerGetCount (void) {
+ uint32_t tick;
+ uint32_t val;
+
+ tick = (uint32_t)osRtxInfo.kernel.tick;
+ val = OSTM_GetCount();
+ if (OSTM_GetOverflow()) {
+ val = OSTM_GetCount();
+ tick++;
+ }
+ val += tick * OSTM_GetPeriod();
+
+ return val;
+}
+
+// Get System Timer frequency.
+// \return system timer frequency.
+uint32_t osRtxSysTimerGetFreq (void) {
+ return ExtTim_Freq;
+}
+
+
+// OS Idle Thread
+__WEAK __NO_RETURN void osRtxIdleThread (void *argument) {
+ (void)argument;
+
+ for (;;) {}
+}
+
+// OS Error Callback function
+__WEAK uint32_t osRtxErrorNotify (uint32_t code, void *object_id) {
+ (void)object_id;
+
+ switch (code) {
+ case osRtxErrorStackUnderflow:
+ // Stack underflow detected for thread (thread_id=object_id)
+ break;
+ case osRtxErrorISRQueueOverflow:
+ // ISR Queue overflow detected when inserting object (object_id)
+ break;
+ case osRtxErrorTimerQueueOverflow:
+ // User Timer Callback Queue overflow detected for timer (timer_id=object_id)
+ break;
+ case osRtxErrorClibSpace:
+ // Standard C/C++ library libspace not available: increase OS_THREAD_LIBSPACE_NUM
+ break;
+ case osRtxErrorClibMutex:
+ // Standard C/C++ library mutex initialization failed
+ break;
+ default:
+ break;
+ }
+ for (;;) {}
+//return 0U;
+}
diff --git a/CMSIS/RTOS2/RTX/Config/handlers.c b/CMSIS/RTOS2/RTX/Config/handlers.c
new file mode 100644
index 0000000..6afdccc
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Config/handlers.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: Exception handlers (C functions)
+ *
+ * -----------------------------------------------------------------------------
+ */
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+
+//Fault Status Register (IFSR/DFSR) definitions
+#define FSR_ALIGNMENT_FAULT 0x01 //DFSR only. Fault on first lookup
+#define FSR_INSTRUCTION_CACHE_MAINTENANCE 0x04 //DFSR only - async/external
+#define FSR_SYNC_EXT_TTB_WALK_FIRST 0x0c //sync/external
+#define FSR_SYNC_EXT_TTB_WALK_SECOND 0x0e //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_FIRST 0x1c //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_SECOND 0x1e //sync/external
+#define FSR_TRANSLATION_FAULT_FIRST 0x05 //MMU Fault - internal
+#define FSR_TRANSLATION_FAULT_SECOND 0x07 //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_FIRST 0x03 //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_SECOND 0x06 //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_FIRST 0x09 //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_SECOND 0x0b //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_FIRST 0x0f //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_SECOND 0x0d //MMU Fault - internal
+#define FSR_DEBUG_EVENT 0x02 //internal
+#define FSR_SYNC_EXT_ABORT 0x08 //sync/external
+#define FSR_TLB_CONFLICT_ABORT 0x10 //sync/external
+#define FSR_LOCKDOWN 0x14 //internal
+#define FSR_COPROCESSOR_ABORT 0x1a //internal
+#define FSR_SYNC_PARITY_ERROR 0x19 //sync/external
+#define FSR_ASYNC_EXTERNAL_ABORT 0x16 //DFSR only - async/external
+#define FSR_ASYNC_PARITY_ERROR 0x18 //DFSR only - async/external
+
+void CDAbtHandler(uint32_t DFSR, uint32_t DFAR, uint32_t LR) {
+ uint32_t FS = (DFSR & (1 << 10)) >> 6 | (DFSR & 0x0f); //Store Fault Status
+
+ switch(FS) {
+ //Synchronous parity errors - retry
+ case FSR_SYNC_PARITY_ERROR:
+ case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+ case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+ return;
+
+ //Your code here. Value in DFAR is invalid for some fault statuses.
+ case FSR_ALIGNMENT_FAULT:
+ case FSR_INSTRUCTION_CACHE_MAINTENANCE:
+ case FSR_SYNC_EXT_TTB_WALK_FIRST:
+ case FSR_SYNC_EXT_TTB_WALK_SECOND:
+ case FSR_TRANSLATION_FAULT_FIRST:
+ case FSR_TRANSLATION_FAULT_SECOND:
+ case FSR_ACCESS_FLAG_FAULT_FIRST:
+ case FSR_ACCESS_FLAG_FAULT_SECOND:
+ case FSR_DOMAIN_FAULT_FIRST:
+ case FSR_DOMAIN_FAULT_SECOND:
+ case FSR_PERMISSION_FAULT_FIRST:
+ case FSR_PERMISSION_FAULT_SECOND:
+ case FSR_DEBUG_EVENT:
+ case FSR_SYNC_EXT_ABORT:
+ case FSR_TLB_CONFLICT_ABORT:
+ case FSR_LOCKDOWN:
+ case FSR_COPROCESSOR_ABORT:
+ case FSR_ASYNC_EXTERNAL_ABORT: //DFAR invalid
+ case FSR_ASYNC_PARITY_ERROR: //DFAR invalid
+ default:
+ while(1);
+ }
+}
+
+void CPAbtHandler(uint32_t IFSR, uint32_t IFAR, uint32_t LR) {
+ uint32_t FS = (IFSR & (1 << 10)) >> 6 | (IFSR & 0x0f); //Store Fault Status
+
+ switch(FS) {
+ //Synchronous parity errors - retry
+ case FSR_SYNC_PARITY_ERROR:
+ case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+ case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+ return;
+
+ //Your code here. Value in IFAR is invalid for some fault statuses.
+ case FSR_SYNC_EXT_TTB_WALK_FIRST:
+ case FSR_SYNC_EXT_TTB_WALK_SECOND:
+ case FSR_TRANSLATION_FAULT_FIRST:
+ case FSR_TRANSLATION_FAULT_SECOND:
+ case FSR_ACCESS_FLAG_FAULT_FIRST:
+ case FSR_ACCESS_FLAG_FAULT_SECOND:
+ case FSR_DOMAIN_FAULT_FIRST:
+ case FSR_DOMAIN_FAULT_SECOND:
+ case FSR_PERMISSION_FAULT_FIRST:
+ case FSR_PERMISSION_FAULT_SECOND:
+ case FSR_DEBUG_EVENT: //IFAR invalid
+ case FSR_SYNC_EXT_ABORT:
+ case FSR_TLB_CONFLICT_ABORT:
+ case FSR_LOCKDOWN:
+ case FSR_COPROCESSOR_ABORT:
+ default:
+ while(1);
+ }
+}
+
+
+//returns amount to decrement lr by
+//this will be 0 when we have emulated the instruction and want to execute the next instruction
+//this will be 2 when we have performed some maintenance and want to retry the instruction in Thumb (state == 2)
+//this will be 4 when we have performed some maintenance and want to retry the instruction in ARM (state == 4)
+uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
+ const int THUMB = 2;
+ const int ARM = 4;
+ //Lazy VFP/NEON initialisation and switching
+
+ // (ARM ARM section A7.5) VFP data processing instruction?
+ // (ARM ARM section A7.6) VFP/NEON register load/store instruction?
+ // (ARM ARM section A7.8) VFP/NEON register data transfer instruction?
+ // (ARM ARM section A7.9) VFP/NEON 64-bit register data transfer instruction?
+ if ((state == ARM && ((opcode & 0x0C000000) >> 26 == 0x03)) ||
+ (state == THUMB && ((opcode & 0xEC000000) >> 26 == 0x3B))) {
+ if (((opcode & 0x00000E00) >> 9) == 5) {
+ __FPU_Enable();
+ return state;
+ }
+ }
+
+ // (ARM ARM section A7.4) NEON data processing instruction?
+ if ((state == ARM && ((opcode & 0xFE000000) >> 24 == 0xF2)) ||
+ (state == THUMB && ((opcode & 0xEF000000) >> 24 == 0xEF)) ||
+ // (ARM ARM section A7.7) NEON load/store instruction?
+ (state == ARM && ((opcode >> 24) == 0xF4)) ||
+ (state == THUMB && ((opcode >> 24) == 0xF9))) {
+ __FPU_Enable();
+ return state;
+ }
+
+ //Add code here for other Undef cases
+ while(1);
+}
diff --git a/CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s b/CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s
new file mode 100644
index 0000000..f94e76e
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s
@@ -0,0 +1,424 @@
+;/*
+; * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Licensed under the Apache License, Version 2.0 (the License); you may
+; * not use this file except in compliance with the License.
+; * You may obtain a copy of the License at
+; *
+; * www.apache.org/licenses/LICENSE-2.0
+; *
+; * Unless required by applicable law or agreed to in writing, software
+; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; * See the License for the specific language governing permissions and
+; * limitations under the License.
+; *
+; * -----------------------------------------------------------------------------
+; *
+; * Project: CMSIS-RTOS RTX
+; * Title: Cortex-A Exception handlers (using GIC)
+; *
+; * -----------------------------------------------------------------------------
+; */
+
+ICDABR0_OFFSET EQU 0x00000300 ; GICD: Active Bit Register 0 offset
+ICDIPR0_OFFSET EQU 0x00000400 ; GICD: Interrupt Priority Register 0 offset
+ICCIAR_OFFSET EQU 0x0000000C ; GICI: Interrupt Acknowledge Register offset
+ICCEOIR_OFFSET EQU 0x00000010 ; GICI: End of Interrupt Register offset
+ICCHPIR_OFFSET EQU 0x00000018 ; GICI: Highest Pending Interrupt Register offset
+
+MODE_FIQ EQU 0x11
+MODE_IRQ EQU 0x12
+MODE_SVC EQU 0x13
+MODE_ABT EQU 0x17
+MODE_UND EQU 0x1B
+
+CPSR_BIT_T EQU 0x20
+
+I_T_RUN_OFS EQU 28 ; osRtxInfo.thread.run offset
+TCB_SP_FRAME EQU 34 ; osRtxThread_t.stack_frame offset
+TCB_SP_OFS EQU 56 ; osRtxThread_t.sp offset
+
+
+ PRESERVE8
+ ARM
+
+
+ AREA |.constdata|, DATA, READONLY
+ EXPORT irqRtxLib
+irqRtxLib DCB 0 ; Non weak library reference
+
+
+ AREA |.data|, DATA, READWRITE
+ID0_Active DCB 4 ; Flag used to workaround GIC 390 errata 733075
+
+
+ AREA |.text|, CODE, READONLY
+
+
+Undef_Handler\
+ PROC
+ EXPORT Undef_Handler
+ IMPORT CUndefHandler
+
+ SRSFD SP!, #MODE_UND
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to UND mode stack
+
+ MRS R0, SPSR
+ TST R0, #CPSR_BIT_T ; Check mode
+ MOVEQ R1, #4 ; R1 = 4 ARM mode
+ MOVNE R1, #2 ; R1 = 2 Thumb mode
+ SUB R0, LR, R1
+ LDREQ R0, [R0] ; ARM mode - R0 points to offending instruction
+ BEQ Undef_Cont
+
+ ; Thumb instruction
+ ; Determine if it is a 32-bit Thumb instruction
+ LDRH R0, [R0]
+ MOV R2, #0x1C
+ CMP R2, R0, LSR #11
+ BHS Undef_Cont ; 16-bit Thumb instruction
+
+ ; 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
+ LDRH R2, [LR]
+ ORR R0, R2, R0, LSL #16
+Undef_Cont
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ ; R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
+ BL CUndefHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ LDR LR, [SP, #24] ; Restore stacked LR and possibly adjust for retry
+ SUB LR, LR, R0
+ LDR R0, [SP, #28] ; Restore stacked SPSR
+ MSR SPSR_CXSF, R0
+ POP {R0-R4, R12} ; Restore stacked APCS registers
+ ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
+ MOVS PC, LR
+
+ ENDP
+
+
+PAbt_Handler\
+ PROC
+ EXPORT PAbt_Handler
+ IMPORT CPAbtHandler
+
+ SUB LR, LR, #4 ; Pre-adjust LR
+ SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
+ MRC p15, 0, R0, c5, c0, 1 ; IFSR
+ MRC p15, 0, R1, c6, c0, 2 ; IFAR
+
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ BL CPAbtHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ POP {R0-R4, R12} ; Restore stack APCS registers
+ RFEFD SP! ; Return from exception
+
+ ENDP
+
+
+DAbt_Handler\
+ PROC
+ EXPORT DAbt_Handler
+ IMPORT CDAbtHandler
+
+ SUB LR, LR, #8 ; Pre-adjust LR
+ SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
+ CLREX ; State of exclusive monitors unknown after taken data abort
+ MRC p15, 0, R0, c5, c0, 0 ; DFSR
+ MRC p15, 0, R1, c6, c0, 0 ; DFAR
+
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ BL CDAbtHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ POP {R0-R4, R12} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from exception
+
+ ENDP
+
+
+IRQ_Handler\
+ PROC
+ EXPORT IRQ_Handler
+ IMPORT IRQTable
+ IMPORT IRQCount
+ IMPORT osRtxIrqHandler
+ IMPORT irqRtxGicBase
+
+ SUB LR, LR, #4 ; Pre-adjust LR
+ SRSFD SP!, #MODE_IRQ ; Save LR_irq and SPRS_irq
+ PUSH {R0-R3, R12, LR} ; Save APCS corruptible registers
+
+ ; Identify and acknowledge interrupt
+ LDR R1, =irqRtxGicBase;
+ LDR R1, [R1, #4]
+ LDR R0, [R1, #ICCHPIR_OFFSET] ; Dummy Read GICI ICCHPIR to avoid GIC 390 errata 801120
+ LDR R0, [R1, #ICCIAR_OFFSET] ; Read GICI ICCIAR
+ DSB ; Ensure that interrupt acknowledge completes before re-enabling interrupts
+
+ ; Workaround GIC 390 errata 733075 - see GIC-390_Errata_Notice_v6.pdf dated 09-Jul-2014
+ ; The following workaround code is for a single-core system. It would be different in a multi-core system.
+ ; If the ID is 0 or 0x3FE or 0x3FF, then the GIC CPU interface may be locked-up so unlock it, otherwise service the interrupt as normal
+ ; Special IDs 1020=0x3FC and 1021=0x3FD are reserved values in GICv1 and GICv2 so will not occur here
+ CMP R0, #0
+ BEQ IRQ_Unlock
+ MOV R2, #0x3FE
+ CMP R0, R2
+ BLT IRQ_Normal
+IRQ_Unlock
+ ; Unlock the CPU interface with a dummy write to ICDIPR0
+ LDR R2, =irqRtxGicBase
+ LDR R2, [R2]
+ LDR R3, [R2, #ICDIPR0_OFFSET]
+ STR R3, [R2, #ICDIPR0_OFFSET]
+ DSB ; Ensure the write completes before continuing
+
+ ; If the ID is 0 and it is active and has not been seen before, then service it as normal,
+ ; otherwise the interrupt should be treated as spurious and not serviced.
+ CMP R0, #0
+ BNE IRQ_Exit ; Not 0, so spurious
+ LDR R3, [R2, #ICDABR0_OFFSET] ; Get the interrupt state
+ TST R3, #1
+ BEQ IRQ_Exit ; Not active, so spurious
+ LDR R2, =ID0_Active
+ LDRB R3, [R2]
+ CMP R3, #1
+ BEQ IRQ_Exit ; Seen it before, so spurious
+
+ ; Record that ID0 has now been seen, then service it as normal
+ MOV R3, #1
+ STRB R3, [R2]
+ ; End of Workaround GIC 390 errata 733075
+
+IRQ_Normal
+ LDR R2, =IRQCount ; Read number of entries in IRQ handler table
+ LDR R2, [R2]
+ CMP R0, R2 ; Check if IRQ ID is within range
+ MOV R2, #0
+ BHS IRQ_End ; Out of range, return as normal
+ LDR R2, =IRQTable ; Read IRQ handler address from IRQ table
+ LDR R2, [R2, R0, LSL #2]
+ CMP R2, #0 ; Check if handler address is 0
+ BEQ IRQ_End ; If 0, end interrupt and return
+ PUSH {R0, R1} ; Store IRQ ID and GIC CPU Interface base address
+
+ CPS #MODE_SVC ; Change to SVC mode
+
+ MOV R3, SP ; Move SP into R3
+ AND R3, R3, #4 ; Get stack adjustment to ensure 8-byte alignment
+ SUB SP, SP, R3 ; Adjust stack
+ PUSH {R2, R3, R12, LR} ; Store handler address(R2), stack adjustment(R3) and user R12, LR
+
+ CPSIE i ; Re-enable interrupts
+ BLX R2 ; Call IRQ handler
+ CPSID i ; Disable interrupts
+
+ POP {R2, R3, R12, LR} ; Restore handler address(R2), stack adjustment(R3) and user R12, LR
+ ADD SP, SP, R3 ; Unadjust stack
+
+ CPS #MODE_IRQ ; Change to IRQ mode
+ POP {R0, R1} ; Restore IRQ ID and GIC CPU Interface base address
+ DSB ; Ensure that interrupt source is cleared before signalling End Of Interrupt
+IRQ_End
+ ; R0 =IRQ ID, R1 =GICI_BASE
+ ; EOI does not need to be written for IDs 1020 to 1023 (0x3FC to 0x3FF)
+ STR R0, [R1, #ICCEOIR_OFFSET] ; Normal end-of-interrupt write to EOIR (GIC CPU Interface register) to clear the active bit
+
+ ; If it was ID0, clear the seen flag, otherwise return as normal
+ CMP R0, #0
+ LDREQ R1, =ID0_Active
+ STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register
+
+ LDR R3, =osRtxIrqHandler ; Load osRtxIrqHandler function address
+ CMP R2, R3 ; If is the same ass current IRQ handler
+ BEQ osRtxContextSwitch ; Call context switcher
+
+IRQ_Exit
+ POP {R0-R3, R12, LR} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from IRQ handler
+
+ ENDP
+
+
+SVC_Handler\
+ PROC
+ EXPORT SVC_Handler
+ IMPORT osRtxIrqLock
+ IMPORT osRtxIrqUnlock
+ IMPORT osRtxUserSVC
+ IMPORT osRtxInfo
+
+ SRSFD SP!, #MODE_SVC ; Store SPSR_svc and LR_svc onto SVC stack
+ PUSH {R12, LR}
+
+ MRS R12, SPSR ; Load SPSR
+ TST R12, #CPSR_BIT_T ; Thumb bit set?
+ LDRNEH R12, [LR,#-2] ; Thumb: load halfword
+ BICNE R12, R12, #0xFF00 ; extract SVC number
+ LDREQ R12, [LR,#-4] ; ARM: load word
+ BICEQ R12, R12, #0xFF000000 ; extract SVC number
+ CMP R12, #0 ; Compare SVC number
+ BNE SVC_User ; Branch if User SVC
+
+ PUSH {R0-R3}
+ BLX osRtxIrqLock ; Disable RTX interrupt (timer, PendSV)
+ POP {R0-R3}
+
+ LDR R12, [SP] ; Reload R12 from stack
+
+ CPSIE i ; Re-enable interrupts
+ BLX R12 ; Branch to SVC function
+ CPSID i ; Disable interrupts
+
+ SUB SP, SP, #4 ; Adjust SP
+ STM SP, {SP}^ ; Store SP_usr onto stack
+ POP {R12} ; Pop SP_usr into R12
+ SUB R12, R12, #16 ; Adjust pointer to SP_usr
+ LDMDB R12, {R2,R3} ; Load return values from SVC function
+ PUSH {R0-R3} ; Push return values to stack
+
+ BLX osRtxIrqUnlock ; Enable RTX interrupt (timer, PendSV)
+ B osRtxContextSwitch ; Continue in context switcher
+
+SVC_User
+ PUSH {R4, R5}
+ LDR R5,=osRtxUserSVC ; Load address of SVC table
+ LDR R4,[R5] ; Load SVC maximum number
+ CMP R12,R4 ; Check SVC number range
+ BHI SVC_Done ; Branch if out of range
+
+ LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
+ BLX R12 ; Call SVC Function
+
+SVC_Done
+ POP {R4, R5, R12, LR}
+ RFEFD SP! ; Return from exception
+
+ ENDP
+
+
+osRtxContextSwitch\
+ PROC
+ EXPORT osRtxContextSwitch
+
+ LDR R12, =osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.run
+ LDM R12, {R0, R1} ; Load osRtxInfo.thread.run: curr & next
+ CMP R0, R1 ; Check if context switch is required
+ BEQ osRtxContextExit ; Exit if curr and next are equal
+
+ CMP R0, #0 ; Is osRtxInfo.thread.run.curr == 0
+ ADDEQ SP, SP, #32 ; Equal, curr deleted, adjust current SP
+ BEQ osRtxContextRestore ; Restore context, run.curr = run.next;
+
+osRtxContextSave
+ SUB SP, SP, #4
+ STM SP, {SP}^ ; Save SP_usr to current stack
+ POP {R3} ; Pop SP_usr into R3
+
+ SUB R3, R3, #64 ; Adjust user sp to end of basic frame (R4)
+ STMIA R3!, {R4-R11} ; Save R4-R11 to user
+ POP {R4-R8} ; Pop current R0-R12 into R4-R8
+ STMIA R3!, {R4-R8} ; Store them to user stack
+ STM R3, {LR}^ ; Store LR_usr directly
+ ADD R3, R3, #4 ; Adjust user sp to PC
+ POP {R4-R6} ; Pop current LR, PC, CPSR
+ STMIA R3!, {R5-R6} ; Restore user PC and CPSR
+
+ SUB R3, R3, #64 ; Adjust user sp to R4
+
+ ; Check if VFP state need to be saved
+ MRC p15, 0, R2, c1, c0, 2 ; VFP/NEON access enabled? (CPACR)
+ AND R2, R2, #0x00F00000
+ CMP R2, #0x00F00000
+ BNE osRtxContextSave1 ; Continue, no VFP
+
+ VMRS R2, FPSCR
+ STMDB R3!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 16
+ VSTMDB R3!, {D0-D15}
+ LDRB R2, [R0, #TCB_SP_FRAME] ; Record in TCB that VFP/D16 state is stacked
+ ORR R2, R2, #2
+ STRB R2, [R0, #TCB_SP_FRAME]
+ ENDIF
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VSTMDB R3!, {D0-D15}
+ VSTMDB R3!, {D16-D31}
+ LDRB R2, [R0, #TCB_SP_FRAME] ; Record in TCB that NEON/D32 state is stacked
+ ORR R2, R2, #4
+ STRB R2, [R0, #TCB_SP_FRAME]
+ ENDIF
+
+osRtxContextSave1
+ STR R3, [R0, #TCB_SP_OFS] ; Store user sp to osRtxInfo.thread.run.curr
+
+osRtxContextRestore
+ STR R1, [R12] ; Store run.next to run.curr
+ LDR R3, [R1, #TCB_SP_OFS] ; Load next osRtxThread_t.sp
+ LDRB R2, [R1, #TCB_SP_FRAME] ; Load next osRtxThread_t.stack_frame
+
+ ANDS R2, R2, #0x6 ; Check stack frame for VFP context
+ MRC p15, 0, R2, c1, c0, 2 ; Read CPACR
+ ANDEQ R2, R2, #0xFF0FFFFF ; Disable VFP/NEON access if incoming task does not have stacked VFP/NEON state
+ ORRNE R2, R2, #0x00F00000 ; Enable VFP/NEON access if incoming task does have stacked VFP/NEON state
+ MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
+ BEQ osRtxContextRestore1 ; No VFP
+ ISB ; Only sync if we enabled VFP, otherwise we will context switch before next VFP instruction anyway
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VLDMIA R3!, {D16-D31}
+ ENDIF
+ VLDMIA R3!, {D0-D15}
+ LDR R2, [R3]
+ VMSR FPSCR, R2
+ ADD R3, R3, #8
+
+osRtxContextRestore1
+ LDMIA R3!, {R4-R11} ; Restore R4-R11
+ MOV R12, R3 ; Move sp pointer to R12
+ ADD R3, R3, #32 ; Adjust sp
+ PUSH {R3} ; Push sp onto stack
+ LDMIA SP, {SP}^ ; Restore SP_usr
+ LDMIA R12!, {R0-R3} ; Restore User R0-R3
+ LDR LR, [R12, #12] ; Load SPSR into LR
+ MSR SPSR_CXSF, LR ; Restore SPSR
+ ADD R12, R12, #4 ; Adjust pointer to LR
+ LDM R12, {LR}^ ; Restore LR_usr directly into LR
+ LDR LR, [R12, #4] ; Restore LR
+ LDR R12, [R12, #-4] ; Restore R12
+
+ MOVS PC, LR ; Return from exception
+
+osRtxContextExit
+ POP {R0-R3, R12, LR} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from exception
+
+ ENDP
+
+ END
diff --git a/CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S b/CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S
new file mode 100644
index 0000000..b351bd8
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: Cortex-A Exception handlers (using GIC)
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+ .file "irq_ca.S"
+ .syntax unified
+
+ .equ ICDABR0_OFFSET, 0x00000300 // GICD: Active Bit Register 0 offset
+ .equ ICDIPR0_OFFSET, 0x00000400 // GICD: Interrupt Priority Register 0 offset
+ .equ ICCIAR_OFFSET, 0x0000000C // GICI: Interrupt Acknowledge Register offset
+ .equ ICCEOIR_OFFSET, 0x00000010 // GICI: End of Interrupt Register offset
+ .equ ICCHPIR_OFFSET, 0x00000018 // GICI: Highest Pending Interrupt Register offset
+
+ .equ MODE_FIQ, 0x11
+ .equ MODE_IRQ, 0x12
+ .equ MODE_SVC, 0x13
+ .equ MODE_ABT, 0x17
+ .equ MODE_UND, 0x1B
+
+ .equ CPSR_BIT_T, 0x20
+
+ .equ I_T_RUN_OFS, 28 // osRtxInfo.thread.run offset
+ .equ TCB_SP_FRAME, 34 // osRtxThread_t.stack_frame offset
+ .equ TCB_SP_OFS, 56 // osRtxThread_t.sp offset
+
+
+ .section ".rodata"
+ .global irqRtxLib // Non weak library reference
+irqRtxLib:
+ .byte 0
+
+ .section ".data"
+ID0_Active:
+ .byte 4 // Flag used to workaround GIC 390 errata 733075
+
+ .arm
+ .section ".text"
+ .align 4
+
+
+ .type Undef_Handler, %function
+ .global Undef_Handler
+ .fnstart
+ .cantunwind
+Undef_Handler:
+
+ SRSFD SP!, #MODE_UND
+ PUSH {R0-R4, R12} // Save APCS corruptible registers to UND mode stack
+
+ MRS R0, SPSR
+ TST R0, #CPSR_BIT_T // Check mode
+ MOVEQ R1, #4 // R1 = 4 ARM mode
+ MOVNE R1, #2 // R1 = 2 Thumb mode
+ SUB R0, LR, R1
+ LDREQ R0, [R0] // ARM mode - R0 points to offending instruction
+ BEQ Undef_Cont
+
+ // Thumb instruction
+ // Determine if it is a 32-bit Thumb instruction
+ LDRH R0, [R0]
+ MOV R2, #0x1C
+ CMP R2, R0, LSR #11
+ BHS Undef_Cont // 16-bit Thumb instruction
+
+ // 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
+ LDRH R2, [LR]
+ ORR R0, R2, R0, LSL #16
+Undef_Cont:
+ MOV R2, LR // Set LR to third argument
+
+ AND R12, SP, #4 // Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 // Adjust stack
+ PUSH {R12, LR} // Store stack adjustment and dummy LR
+
+ // R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
+ BL CUndefHandler
+
+ POP {R12, LR} // Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 // Unadjust stack
+
+ LDR LR, [SP, #24] // Restore stacked LR and possibly adjust for retry
+ SUB LR, LR, R0
+ LDR R0, [SP, #28] // Restore stacked SPSR
+ MSR SPSR_cxsf, R0
+ POP {R0-R4, R12} // Restore stacked APCS registers
+ ADD SP, SP, #8 // Adjust SP for already-restored banked registers
+ MOVS PC, LR
+
+ .fnend
+ .size Undef_Handler, .-Undef_Handler
+
+
+ .type PAbt_Handler, %function
+ .global PAbt_Handler
+ .fnstart
+ .cantunwind
+PAbt_Handler:
+
+ SUB LR, LR, #4 // Pre-adjust LR
+ SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack
+ MRC p15, 0, R0, c5, c0, 1 // IFSR
+ MRC p15, 0, R1, c6, c0, 2 // IFAR
+
+ MOV R2, LR // Set LR to third argument
+
+ AND R12, SP, #4 // Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 // Adjust stack
+ PUSH {R12, LR} // Store stack adjustment and dummy LR
+
+ BL CPAbtHandler
+
+ POP {R12, LR} // Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 // Unadjust stack
+
+ POP {R0-R4, R12} // Restore stack APCS registers
+ RFEFD SP! // Return from exception
+
+ .fnend
+ .size PAbt_Handler, .-PAbt_Handler
+
+
+ .type DAbt_Handler, %function
+ .global DAbt_Handler
+ .fnstart
+ .cantunwind
+DAbt_Handler:
+ SUB LR, LR, #8 // Pre-adjust LR
+ SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack
+ CLREX // State of exclusive monitors unknown after taken data abort
+ MRC p15, 0, R0, c5, c0, 0 // DFSR
+ MRC p15, 0, R1, c6, c0, 0 // DFAR
+
+ MOV R2, LR // Set LR to third argument
+
+ AND R12, SP, #4 // Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 // Adjust stack
+ PUSH {R12, LR} // Store stack adjustment and dummy LR
+
+ BL CDAbtHandler
+
+ POP {R12, LR} // Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 // Unadjust stack
+
+ POP {R0-R4, R12} // Restore stacked APCS registers
+ RFEFD SP! // Return from exception
+
+ .fnend
+ .size DAbt_Handler, .-DAbt_Handler
+
+
+ .type IRQ_Handler, %function
+ .global IRQ_Handler
+ .fnstart
+ .cantunwind
+IRQ_Handler:
+
+ SUB LR, LR, #4 // Pre-adjust LR
+ SRSFD SP!, #MODE_IRQ // Save LR_irq and SPRS_irq
+ PUSH {R0-R3, R12, LR} // Save APCS corruptible registers
+
+ // Identify and acknowledge interrupt
+ LDR R1, =irqRtxGicBase;
+ LDR R1, [R1, #4]
+ LDR R0, [R1, #ICCHPIR_OFFSET] // Dummy Read GICI ICCHPIR to avoid GIC 390 errata 801120
+ LDR R0, [R1, #ICCIAR_OFFSET] // Read GICI ICCIAR
+ DSB // Ensure that interrupt acknowledge completes before re-enabling interrupts
+
+ // Workaround GIC 390 errata 733075 - see GIC-390_Errata_Notice_v6.pdf dated 09-Jul-2014
+ // The following workaround code is for a single-core system. It would be different in a multi-core system.
+ // If the ID is 0 or 0x3FE or 0x3FF, then the GIC CPU interface may be locked-up so unlock it, otherwise service the interrupt as normal
+ // Special IDs 1020=0x3FC and 1021=0x3FD are reserved values in GICv1 and GICv2 so will not occur here
+ CMP R0, #0
+ BEQ IRQ_Unlock
+ MOVW R2, #0x3FE
+ CMP R0, R2
+ BLT IRQ_Normal
+IRQ_Unlock:
+ // Unlock the CPU interface with a dummy write to ICDIPR0
+ LDR R2, =irqRtxGicBase
+ LDR R2, [R2]
+ LDR R3, [R2, #ICDIPR0_OFFSET]
+ STR R3, [R2, #ICDIPR0_OFFSET]
+ DSB // Ensure the write completes before continuing
+
+ // If the ID is 0 and it is active and has not been seen before, then service it as normal,
+ // otherwise the interrupt should be treated as spurious and not serviced.
+ CMP R0, #0
+ BNE IRQ_Exit // Not 0, so spurious
+ LDR R3, [R2, #ICDABR0_OFFSET] // Get the interrupt state
+ TST R3, #1
+ BEQ IRQ_Exit // Not active, so spurious
+ LDR R2, =ID0_Active
+ LDRB R3, [R2]
+ CMP R3, #1
+ BEQ IRQ_Exit // Seen it before, so spurious
+
+ // Record that ID0 has now been seen, then service it as normal
+ MOV R3, #1
+ STRB R3, [R2]
+ // End of Workaround GIC 390 errata 733075
+
+IRQ_Normal:
+ LDR R2, =IRQCount // Read number of entries in IRQ handler table
+ LDR R2, [R2]
+ CMP R0, R2 // Check if IRQ ID is within range
+ MOV R2, #0
+ BHS IRQ_End // Out of range, return as normal
+ LDR R2, =IRQTable // Read IRQ handler address from IRQ table
+ LDR R2, [R2, R0, LSL #2]
+ CMP R2, #0 // Check if handler address is 0
+ BEQ IRQ_End // If 0, end interrupt and return
+ PUSH {R0, R1} // Store IRQ ID and GIC CPU Interface base address
+
+ CPS #MODE_SVC // Change to SVC mode
+
+ MOV R3, SP // Move SP into R3
+ AND R3, R3, #4 // Get stack adjustment to ensure 8-byte alignment
+ SUB SP, SP, R3 // Adjust stack
+ PUSH {R2, R3, R12, LR} // Store handler address(R2), stack adjustment(R3) and user R12, LR
+
+ CPSIE i // Re-enable interrupts
+ BLX R2 // Call IRQ handler
+ CPSID i // Disable interrupts
+
+ POP {R2, R3, R12, LR} // Restore handler address(R2), stack adjustment(R3) and user R12, LR
+ ADD SP, SP, R3 // Unadjust stack
+
+ CPS #MODE_IRQ // Change to IRQ mode
+ POP {R0, R1} // Restore IRQ ID and GIC CPU Interface base address
+ DSB // Ensure that interrupt source is cleared before signalling End Of Interrupt
+IRQ_End:
+ // R0 =IRQ ID, R1 =GICI_BASE
+ // EOI does not need to be written for IDs 1020 to 1023 (0x3FC to 0x3FF)
+ STR R0, [R1, #ICCEOIR_OFFSET] // Normal end-of-interrupt write to EOIR (GIC CPU Interface register) to clear the active bit
+
+ // If it was ID0, clear the seen flag, otherwise return as normal
+ CMP R0, #0
+ LDREQ R1, =ID0_Active
+ STRBEQ R0, [R1] // Clear the seen flag, using R0 (which is 0), to save loading another register
+
+ LDR R3, =osRtxIrqHandler // Load osRtxIrqHandler function address
+ CMP R2, R3 // If is the same ass current IRQ handler
+ BEQ osRtxContextSwitch // Call context switcher
+
+IRQ_Exit:
+ POP {R0-R3, R12, LR} // Restore stacked APCS registers
+ RFEFD SP! // Return from IRQ handler
+
+ .fnend
+ .size IRQ_Handler, .-IRQ_Handler
+
+
+ .type SVC_Handler, %function
+ .global SVC_Handler
+ .fnstart
+ .cantunwind
+SVC_Handler:
+
+ SRSFD SP!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack
+ PUSH {R12, LR}
+
+ MRS R12, SPSR // Load SPSR
+ TST R12, #CPSR_BIT_T // Thumb bit set?
+ LDRNEH R12, [LR,#-2] // Thumb: load halfword
+ BICNE R12, R12, #0xFF00 // extract SVC number
+ LDREQ R12, [LR,#-4] // ARM: load word
+ BICEQ R12, R12, #0xFF000000 // extract SVC number
+ CMP R12, #0 // Compare SVC number
+ BNE SVC_User // Branch if User SVC
+
+ PUSH {R0-R3}
+ BLX osRtxIrqLock // Disable RTX interrupt (timer, PendSV)
+ POP {R0-R3}
+
+ LDR R12, [SP] // Reload R12 from stack
+
+ CPSIE i // Re-enable interrupts
+ BLX R12 // Branch to SVC function
+ CPSID i // Disable interrupts
+
+ SUB SP, SP, #4 // Adjust SP
+ STM SP, {SP}^ // Store SP_usr onto stack
+ POP {R12} // Pop SP_usr into R12
+ SUB R12, R12, #16 // Adjust pointer to SP_usr
+ LDMDB R12, {R2,R3} // Load return values from SVC function
+ PUSH {R0-R3} // Push return values to stack
+
+ BLX osRtxIrqUnlock // Enable RTX interrupt (timer, PendSV)
+ B osRtxContextSwitch // Continue in context switcher
+
+SVC_User:
+ PUSH {R4, R5}
+ LDR R5,=osRtxUserSVC // Load address of SVC table
+ LDR R4,[R5] // Load SVC maximum number
+ CMP R12,R4 // Check SVC number range
+ BHI SVC_Done // Branch if out of range
+
+ LDR R12,[R5,R12,LSL #2] // Load SVC Function Address
+ BLX R12 // Call SVC Function
+
+SVC_Done:
+ POP {R4, R5, R12, LR}
+ RFEFD SP! // Return from exception
+
+ .fnend
+ .size SVC_Handler, .-SVC_Handler
+
+
+ .type osRtxContextSwitch, %function
+ .global osRtxContextSwitch
+ .fnstart
+ .cantunwind
+osRtxContextSwitch:
+
+ LDR R12,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
+ LDM R12, {R0, R1} // Load osRtxInfo.thread.run: curr & next
+ CMP R0, R1 // Check if context switch is required
+ BEQ osRtxContextExit // Exit if curr and next are equal
+
+ CMP R0, #0 // Is osRtxInfo.thread.run.curr == 0
+ ADDEQ SP, SP, #32 // Equal, curr deleted, adjust current SP
+ BEQ osRtxContextRestore // Restore context, run.curr = run.next;
+
+osRtxContextSave:
+ SUB SP, SP, #4
+ STM SP, {SP}^ // Save SP_usr to current stack
+ POP {R3} // Pop SP_usr into R3
+
+ SUB R3, R3, #64 // Adjust user sp to end of basic frame (R4)
+ STMIA R3!, {R4-R11} // Save R4-R11 to user
+ POP {R4-R8} // Pop current R0-R12 into R4-R8
+ STMIA R3!, {R4-R8} // Store them to user stack
+ STM R3, {LR}^ // Store LR_usr directly
+ ADD R3, R3, #4 // Adjust user sp to PC
+ POP {R4-R6} // Pop current LR, PC, CPSR
+ STMIA R3!, {R5-R6} // Restore user PC and CPSR
+
+ SUB R3, R3, #64 // Adjust user sp to R4
+
+ // Check if VFP state need to be saved
+ MRC p15, 0, R2, c1, c0, 2 // VFP/NEON access enabled? (CPACR)
+ AND R2, R2, #0x00F00000
+ CMP R2, #0x00F00000
+ BNE osRtxContextSave1 // Continue, no VFP
+
+ VMRS R2, FPSCR
+ STMDB R3!, {R2,R12} // Push FPSCR, maintain 8-byte alignment
+// .if {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 16
+ VSTMDB R3!, {D0-D15}
+ LDRB R2, [R0, #TCB_SP_FRAME] // Record in TCB that VFP/D16 state is stacked
+ ORR R2, R2, #2
+ STRB R2, [R0, #TCB_SP_FRAME]
+// .endif
+// .if {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VSTMDB R3!, {D0-D15}
+ VSTMDB R3!, {D16-D31}
+ LDRB R2, [R0, #TCB_SP_FRAME] // Record in TCB that NEON/D32 state is stacked
+ ORR R2, R2, #4
+ STRB R2, [R0, #TCB_SP_FRAME]
+// .endif
+
+osRtxContextSave1:
+ STR R3, [R0, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr
+
+osRtxContextRestore:
+ STR R1, [R12] // Store run.next to run.curr
+ LDR R3, [R1, #TCB_SP_OFS] // Load next osRtxThread_t.sp
+ LDRB R2, [R1, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame
+
+ ANDS R2, R2, #0x6 // Check stack frame for VFP context
+ MRC p15, 0, R2, c1, c0, 2 // Read CPACR
+ ANDEQ R2, R2, #0xFF0FFFFF // Disable VFP/NEON access if incoming task does not have stacked VFP/NEON state
+ ORRNE R2, R2, #0x00F00000 // Enable VFP/NEON access if incoming task does have stacked VFP/NEON state
+ MCR p15, 0, R2, c1, c0, 2 // Write CPACR
+ BEQ osRtxContextRestore1 // No VFP
+ ISB // Only sync if we enabled VFP, otherwise we will context switch before next VFP instruction anyway
+// .if {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VLDMIA R3!, {D16-D31}
+// .endif
+ VLDMIA R3!, {D0-D15}
+ LDR R2, [R3]
+ VMSR FPSCR, R2
+ ADD R3, R3, #8
+
+osRtxContextRestore1:
+ LDMIA R3!, {R4-R11} // Restore R4-R11
+ MOV R12, R3 // Move sp pointer to R12
+ ADD R3, R3, #32 // Adjust sp
+ PUSH {R3} // Push sp onto stack
+ LDMIA SP, {SP}^ // Restore SP_usr
+ LDMIA R12!, {R0-R3} // Restore User R0-R3
+ LDR LR, [R12, #12] // Load SPSR into LR
+ MSR SPSR_cxsf, LR // Restore SPSR
+ ADD R12, R12, #4 // Adjust pointer to LR
+ LDM R12, {LR}^ // Restore LR_usr directly into LR
+ LDR LR, [R12, #4] // Restore LR
+ LDR R12, [R12, #-4] // Restore R12
+
+ MOVS PC, LR // Return from exception
+
+osRtxContextExit:
+ POP {R0-R3, R12, LR} // Restore stacked APCS registers
+ RFEFD SP! // Return from exception
+
+ .fnend
+ .size osRtxContextSwitch, .-osRtxContextSwitch
+
+ .end
diff --git a/CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s b/CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s
new file mode 100644
index 0000000..2c62bf3
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s
@@ -0,0 +1,408 @@
+;/*
+; * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Licensed under the Apache License, Version 2.0 (the License); you may
+; * not use this file except in compliance with the License.
+; * You may obtain a copy of the License at
+; *
+; * www.apache.org/licenses/LICENSE-2.0
+; *
+; * Unless required by applicable law or agreed to in writing, software
+; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; * See the License for the specific language governing permissions and
+; * limitations under the License.
+; *
+; * -----------------------------------------------------------------------------
+; *
+; * Project: CMSIS-RTOS RTX
+; * Title: Cortex-A Exception handlers (using GIC)
+; *
+; * -----------------------------------------------------------------------------
+; */
+
+ NAME irq_ca.s
+
+ICDABR0_OFFSET EQU 0x00000300 ; GICD: Active Bit Register 0 offset
+ICDIPR0_OFFSET EQU 0x00000400 ; GICD: Interrupt Priority Register 0 offset
+ICCIAR_OFFSET EQU 0x0000000C ; GICI: Interrupt Acknowledge Register offset
+ICCEOIR_OFFSET EQU 0x00000010 ; GICI: End of Interrupt Register offset
+ICCHPIR_OFFSET EQU 0x00000018 ; GICI: Highest Pending Interrupt Register offset
+
+MODE_FIQ EQU 0x11
+MODE_IRQ EQU 0x12
+MODE_SVC EQU 0x13
+MODE_ABT EQU 0x17
+MODE_UND EQU 0x1B
+
+CPSR_BIT_T EQU 0x20
+
+I_T_RUN_OFS EQU 28 ; osRtxInfo.thread.run offset
+TCB_SP_FRAME EQU 34 ; osRtxThread_t.stack_frame offset
+TCB_SP_OFS EQU 56 ; osRtxThread_t.sp offset
+
+
+ PRESERVE8
+ ARM
+
+
+ SECTION .rodata:DATA:NOROOT(2)
+ EXPORT irqRtxLib
+irqRtxLib DCB 0 ; Non weak library reference
+
+
+ SECTION .data:DATA:NOROOT(2)
+ID0_Active DCB 4 ; Flag used to workaround GIC 390 errata 733075
+
+
+ SECTION .text:CODE:NOROOT(2)
+
+
+Undef_Handler
+ EXPORT Undef_Handler
+ IMPORT CUndefHandler
+
+ SRSFD SP!, #MODE_UND
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to UND mode stack
+
+ MRS R0, SPSR
+ TST R0, #CPSR_BIT_T ; Check mode
+ MOVEQ R1, #4 ; R1 = 4 ARM mode
+ MOVNE R1, #2 ; R1 = 2 Thumb mode
+ SUB R0, LR, R1
+ LDREQ R0, [R0] ; ARM mode - R0 points to offending instruction
+ BEQ Undef_Cont
+
+ ; Thumb instruction
+ ; Determine if it is a 32-bit Thumb instruction
+ LDRH R0, [R0]
+ MOV R2, #0x1C
+ CMP R2, R0, LSR #11
+ BHS Undef_Cont ; 16-bit Thumb instruction
+
+ ; 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
+ LDRH R2, [LR]
+ ORR R0, R2, R0, LSL #16
+Undef_Cont
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ ; R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
+ BL CUndefHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ LDR LR, [SP, #24] ; Restore stacked LR and possibly adjust for retry
+ SUB LR, LR, R0
+ LDR R0, [SP, #28] ; Restore stacked SPSR
+ MSR SPSR_CXSF, R0
+ POP {R0-R4, R12} ; Restore stacked APCS registers
+ ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
+ MOVS PC, LR
+
+
+PAbt_Handler
+ EXPORT PAbt_Handler
+ IMPORT CPAbtHandler
+
+ SUB LR, LR, #4 ; Pre-adjust LR
+ SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
+ MRC p15, 0, R0, c5, c0, 1 ; IFSR
+ MRC p15, 0, R1, c6, c0, 2 ; IFAR
+
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ BL CPAbtHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ POP {R0-R4, R12} ; Restore stack APCS registers
+ RFEFD SP! ; Return from exception
+
+
+DAbt_Handler
+ EXPORT DAbt_Handler
+ IMPORT CDAbtHandler
+
+ SUB LR, LR, #8 ; Pre-adjust LR
+ SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
+ PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
+ CLREX ; State of exclusive monitors unknown after taken data abort
+ MRC p15, 0, R0, c5, c0, 0 ; DFSR
+ MRC p15, 0, R1, c6, c0, 0 ; DFAR
+
+ MOV R2, LR ; Set LR to third argument
+
+ AND R12, SP, #4 ; Ensure stack is 8-byte aligned
+ SUB SP, SP, R12 ; Adjust stack
+ PUSH {R12, LR} ; Store stack adjustment and dummy LR
+
+ BL CDAbtHandler
+
+ POP {R12, LR} ; Get stack adjustment & discard dummy LR
+ ADD SP, SP, R12 ; Unadjust stack
+
+ POP {R0-R4, R12} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from exception
+
+
+IRQ_Handler
+ EXPORT IRQ_Handler
+ IMPORT IRQTable
+ IMPORT IRQCount
+ IMPORT osRtxIrqHandler
+ IMPORT irqRtxGicBase
+
+ SUB LR, LR, #4 ; Pre-adjust LR
+ SRSFD SP!, #MODE_IRQ ; Save LR_irq and SPRS_irq
+ PUSH {R0-R3, R12, LR} ; Save APCS corruptible registers
+
+ ; Identify and acknowledge interrupt
+ LDR R1, =irqRtxGicBase;
+ LDR R1, [R1, #4]
+ LDR R0, [R1, #ICCHPIR_OFFSET] ; Dummy Read GICI ICCHPIR to avoid GIC 390 errata 801120
+ LDR R0, [R1, #ICCIAR_OFFSET] ; Read GICI ICCIAR
+ DSB ; Ensure that interrupt acknowledge completes before re-enabling interrupts
+
+ ; Workaround GIC 390 errata 733075 - see GIC-390_Errata_Notice_v6.pdf dated 09-Jul-2014
+ ; The following workaround code is for a single-core system. It would be different in a multi-core system.
+ ; If the ID is 0 or 0x3FE or 0x3FF, then the GIC CPU interface may be locked-up so unlock it, otherwise service the interrupt as normal
+ ; Special IDs 1020=0x3FC and 1021=0x3FD are reserved values in GICv1 and GICv2 so will not occur here
+ CMP R0, #0
+ BEQ IRQ_Unlock
+ MOV R2, #0x3FE
+ CMP R0, R2
+ BLT IRQ_Normal
+IRQ_Unlock
+ ; Unlock the CPU interface with a dummy write to ICDIPR0
+ LDR R2, =irqRtxGicBase
+ LDR R2, [R2]
+ LDR R3, [R2, #ICDIPR0_OFFSET]
+ STR R3, [R2, #ICDIPR0_OFFSET]
+ DSB ; Ensure the write completes before continuing
+
+ ; If the ID is 0 and it is active and has not been seen before, then service it as normal,
+ ; otherwise the interrupt should be treated as spurious and not serviced.
+ CMP R0, #0
+ BNE IRQ_Exit ; Not 0, so spurious
+ LDR R3, [R2, #ICDABR0_OFFSET] ; Get the interrupt state
+ TST R3, #1
+ BEQ IRQ_Exit ; Not active, so spurious
+ LDR R2, =ID0_Active
+ LDRB R3, [R2]
+ CMP R3, #1
+ BEQ IRQ_Exit ; Seen it before, so spurious
+
+ ; Record that ID0 has now been seen, then service it as normal
+ MOV R3, #1
+ STRB R3, [R2]
+ ; End of Workaround GIC 390 errata 733075
+
+IRQ_Normal
+ LDR R2, =IRQCount ; Read number of entries in IRQ handler table
+ LDR R2, [R2]
+ CMP R0, R2 ; Check if IRQ ID is within range
+ MOV R2, #0
+ BHS IRQ_End ; Out of range, return as normal
+ LDR R2, =IRQTable ; Read IRQ handler address from IRQ table
+ LDR R2, [R2, R0, LSL #2]
+ CMP R2, #0 ; Check if handler address is 0
+ BEQ IRQ_End ; If 0, end interrupt and return
+ PUSH {R0, R1} ; Store IRQ ID and GIC CPU Interface base address
+
+ CPS #MODE_SVC ; Change to SVC mode
+
+ MOV R3, SP ; Move SP into R3
+ AND R3, R3, #4 ; Get stack adjustment to ensure 8-byte alignment
+ SUB SP, SP, R3 ; Adjust stack
+ PUSH {R2, R3, R12, LR} ; Store handler address(R2), stack adjustment(R3) and user R12, LR
+
+ CPSIE i ; Re-enable interrupts
+ BLX R2 ; Call IRQ handler
+ CPSID i ; Disable interrupts
+
+ POP {R2, R3, R12, LR} ; Restore handler address(R2), stack adjustment(R3) and user R12, LR
+ ADD SP, SP, R3 ; Unadjust stack
+
+ CPS #MODE_IRQ ; Change to IRQ mode
+ POP {R0, R1} ; Restore IRQ ID and GIC CPU Interface base address
+ DSB ; Ensure that interrupt source is cleared before signalling End Of Interrupt
+IRQ_End
+ ; R0 =IRQ ID, R1 =GICI_BASE
+ ; EOI does not need to be written for IDs 1020 to 1023 (0x3FC to 0x3FF)
+ STR R0, [R1, #ICCEOIR_OFFSET] ; Normal end-of-interrupt write to EOIR (GIC CPU Interface register) to clear the active bit
+
+ ; If it was ID0, clear the seen flag, otherwise return as normal
+ CMP R0, #0
+ LDREQ R1, =ID0_Active
+ STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register
+
+ LDR R3, =osRtxIrqHandler ; Load osRtxIrqHandler function address
+ CMP R2, R3 ; If is the same ass current IRQ handler
+ BEQ osRtxContextSwitch ; Call context switcher
+
+IRQ_Exit
+ POP {R0-R3, R12, LR} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from IRQ handler
+
+
+SVC_Handler
+ EXPORT SVC_Handler
+ IMPORT osRtxIrqLock
+ IMPORT osRtxIrqUnlock
+ IMPORT osRtxUserSVC
+ IMPORT osRtxInfo
+
+ SRSFD SP!, #MODE_SVC ; Store SPSR_svc and LR_svc onto SVC stack
+ PUSH {R12, LR}
+
+ MRS R12, SPSR ; Load SPSR
+ TST R12, #CPSR_BIT_T ; Thumb bit set?
+ LDRNEH R12, [LR,#-2] ; Thumb: load halfword
+ BICNE R12, R12, #0xFF00 ; extract SVC number
+ LDREQ R12, [LR,#-4] ; ARM: load word
+ BICEQ R12, R12, #0xFF000000 ; extract SVC number
+ CMP R12, #0 ; Compare SVC number
+ BNE SVC_User ; Branch if User SVC
+
+ PUSH {R0-R3}
+ BLX osRtxIrqLock ; Disable RTX interrupt (timer, PendSV)
+ POP {R0-R3}
+
+ LDR R12, [SP] ; Reload R12 from stack
+
+ CPSIE i ; Re-enable interrupts
+ BLX R12 ; Branch to SVC function
+ CPSID i ; Disable interrupts
+
+ SUB SP, SP, #4 ; Adjust SP
+ STM SP, {SP}^ ; Store SP_usr onto stack
+ POP {R12} ; Pop SP_usr into R12
+ SUB R12, R12, #16 ; Adjust pointer to SP_usr
+ LDMDB R12, {R2,R3} ; Load return values from SVC function
+ PUSH {R0-R3} ; Push return values to stack
+
+ BLX osRtxIrqUnlock ; Enable RTX interrupt (timer, PendSV)
+ B osRtxContextSwitch ; Continue in context switcher
+
+SVC_User
+ PUSH {R4, R5}
+ LDR R5,=osRtxUserSVC ; Load address of SVC table
+ LDR R4,[R5] ; Load SVC maximum number
+ CMP R12,R4 ; Check SVC number range
+ BHI SVC_Done ; Branch if out of range
+
+ LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
+ BLX R12 ; Call SVC Function
+
+SVC_Done
+ POP {R4, R5, R12, LR}
+ RFEFD SP! ; Return from exception
+
+
+osRtxContextSwitch
+ EXPORT osRtxContextSwitch
+
+ LDR R12, =osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.run
+ LDM R12, {R0, R1} ; Load osRtxInfo.thread.run: curr & next
+ CMP R0, R1 ; Check if context switch is required
+ BEQ osRtxContextExit ; Exit if curr and next are equal
+
+ CMP R0, #0 ; Is osRtxInfo.thread.run.curr == 0
+ ADDEQ SP, SP, #32 ; Equal, curr deleted, adjust current SP
+ BEQ osRtxContextRestore ; Restore context, run.curr = run.next;
+
+osRtxContextSave
+ SUB SP, SP, #4
+ STM SP, {SP}^ ; Save SP_usr to current stack
+ POP {R3} ; Pop SP_usr into R3
+
+ SUB R3, R3, #64 ; Adjust user sp to end of basic frame (R4)
+ STMIA R3!, {R4-R11} ; Save R4-R11 to user
+ POP {R4-R8} ; Pop current R0-R12 into R4-R8
+ STMIA R3!, {R4-R8} ; Store them to user stack
+ STM R3, {LR}^ ; Store LR_usr directly
+ ADD R3, R3, #4 ; Adjust user sp to PC
+ POP {R4-R6} ; Pop current LR, PC, CPSR
+ STMIA R3!, {R5-R6} ; Restore user PC and CPSR
+
+ SUB R3, R3, #64 ; Adjust user sp to R4
+
+ ; Check if VFP state need to be saved
+ MRC p15, 0, R2, c1, c0, 2 ; VFP/NEON access enabled? (CPACR)
+ AND R2, R2, #0x00F00000
+ CMP R2, #0x00F00000
+ BNE osRtxContextSave1 ; Continue, no VFP
+
+ VMRS R2, FPSCR
+ STMDB R3!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 16
+ VSTMDB R3!, {D0-D15}
+ LDRB R2, [R0, #TCB_SP_FRAME] ; Record in TCB that VFP/D16 state is stacked
+ ORR R2, R2, #2
+ STRB R2, [R0, #TCB_SP_FRAME]
+ ENDIF
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VSTMDB R3!, {D0-D15}
+ VSTMDB R3!, {D16-D31}
+ LDRB R2, [R0, #TCB_SP_FRAME] ; Record in TCB that NEON/D32 state is stacked
+ ORR R2, R2, #4
+ STRB R2, [R0, #TCB_SP_FRAME]
+ ENDIF
+
+osRtxContextSave1
+ STR R3, [R0, #TCB_SP_OFS] ; Store user sp to osRtxInfo.thread.run.curr
+
+osRtxContextRestore
+ STR R1, [R12] ; Store run.next to run.curr
+ LDR R3, [R1, #TCB_SP_OFS] ; Load next osRtxThread_t.sp
+ LDRB R2, [R1, #TCB_SP_FRAME] ; Load next osRtxThread_t.stack_frame
+
+ ANDS R2, R2, #0x6 ; Check stack frame for VFP context
+ MRC p15, 0, R2, c1, c0, 2 ; Read CPACR
+ ANDEQ R2, R2, #0xFF0FFFFF ; Disable VFP/NEON access if incoming task does not have stacked VFP/NEON state
+ ORRNE R2, R2, #0x00F00000 ; Enable VFP/NEON access if incoming task does have stacked VFP/NEON state
+ MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
+ BEQ osRtxContextRestore1 ; No VFP
+ ISB ; Only sync if we enabled VFP, otherwise we will context switch before next VFP instruction anyway
+ IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+ VLDMIA R3!, {D16-D31}
+ ENDIF
+ VLDMIA R3!, {D0-D15}
+ LDR R2, [R3]
+ VMSR FPSCR, R2
+ ADD R3, R3, #8
+
+osRtxContextRestore1
+ LDMIA R3!, {R4-R11} ; Restore R4-R11
+ MOV R12, R3 ; Move sp pointer to R12
+ ADD R3, R3, #32 ; Adjust sp
+ PUSH {R3} ; Push sp onto stack
+ LDMIA SP, {SP}^ ; Restore SP_usr
+ LDMIA R12!, {R0-R3} ; Restore User R0-R3
+ LDR LR, [R12, #12] ; Load SPSR into LR
+ MSR SPSR_CXSF, LR ; Restore SPSR
+ ADD R12, R12, #4 ; Adjust pointer to LR
+ LDM R12, {LR}^ ; Restore LR_usr directly into LR
+ LDR LR, [R12, #4] ; Restore LR
+ LDR R12, [R12, #-4] ; Restore R12
+
+ MOVS PC, LR ; Return from exception
+
+osRtxContextExit
+ POP {R0-R3, R12, LR} ; Restore stacked APCS registers
+ RFEFD SP! ; Return from exception
+
+ END
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_core_c.h b/CMSIS/RTOS2/RTX/Source/rtx_core_c.h
new file mode 100644
index 0000000..3e0ddd3
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/rtx_core_c.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: Cortex Core definitions
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#ifndef RTX_CORE_C_H_
+#define RTX_CORE_C_H_
+
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+#ifndef __ARM_ARCH_6M__
+#define __ARM_ARCH_6M__ 0U
+#endif
+#ifndef __ARM_ARCH_7A__
+#define __ARM_ARCH_7A__ 0U
+#endif
+#ifndef __ARM_ARCH_7M__
+#define __ARM_ARCH_7M__ 0U
+#endif
+#ifndef __ARM_ARCH_7EM__
+#define __ARM_ARCH_7EM__ 0U
+#endif
+#ifndef __ARM_ARCH_8M_BASE__
+#define __ARM_ARCH_8M_BASE__ 0U
+#endif
+#ifndef __ARM_ARCH_8M_MAIN__
+#define __ARM_ARCH_8M_MAIN__ 0U
+#endif
+
+#if ((__ARM_ARCH_6M__ + \
+ __ARM_ARCH_7A__ + \
+ __ARM_ARCH_7M__ + \
+ __ARM_ARCH_7EM__ + \
+ __ARM_ARCH_8M_BASE__ + \
+ __ARM_ARCH_8M_MAIN__) != 1U)
+#error "Unknown ARM Architecture!"
+#endif
+
+#if (__ARM_ARCH_7A__ != 0U)
+#include "rtx_core_ca.h"
+#else
+#include "rtx_core_cm.h"
+#endif
+
+#endif // RTX_CORE_C_H_
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h b/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h
new file mode 100644
index 0000000..2e8cb1d
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: Cortex-A Core definitions
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#ifndef RTX_CORE_CA_H_
+#define RTX_CORE_CA_H_
+
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+#define __DOMAIN_NS 0U
+#define __EXCLUSIVE_ACCESS 1U
+
+/* CPSR initial state */
+#define CPSR_INIT_USER 0x00000010U
+#define CPSR_INIT_SYSTEM 0x0000001FU
+
+/* CPSR bit definitions */
+#define CPSR_T_BIT 0x20U
+#define CPSR_I_BIT 0x80U
+#define CPSR_F_BIT 0x40U
+
+/* CPSR mode bitmasks */
+#define CPSR_MODE_USER 0x10U
+#define CPSR_MODE_SYSTEM 0x1FU
+
+/* Determine privilege level */
+#define IS_PRIVILEGED() (__get_mode() != CPSR_MODE_USER)
+#define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
+#define IS_IRQ_MASKED() (0U)
+
+#define STACK_FRAME_INIT 0x00U
+
+#define IS_VFP_D32_STACK_FRAME(n) (((n) & 0x04U) != 0U)
+#define IS_VFP_D16_STACK_FRAME(n) (((n) & 0x02U) != 0U)
+
+/* Emulate M profile get_PSP: SP_usr - (8*4) */
+#if defined(__CC_ARM)
+static __asm uint32_t __get_PSP (void) {
+ arm
+ sub sp, sp, #4
+ stm sp, {sp}^
+ pop {r0}
+ sub r0, r0, #32
+ bx lr
+}
+#else
+__STATIC_INLINE uint32_t __get_PSP (void) {
+ register uint32_t ret;
+
+ __asm volatile (
+ ".syntax unified\n\t"
+ ".arm\n\t"
+ "sub sp,sp,#4\n\t"
+ "stm sp,{sp}^\n\t"
+ "pop {%[ret]}\n\t"
+ "sub %[ret],%[ret],#32\n\t"
+ : [ret] "=&l" (ret)
+ :
+ : "memory"
+ );
+
+ return ret;
+}
+#endif
+
+
+// ==== Service Calls definitions ====
+
+#if defined(__CC_ARM)
+
+#define __SVC_INDIRECT(n) __svc_indirect(n)
+
+#define SVC0_0N(f,t) \
+__SVC_INDIRECT(0) t svc##f (t(*)()); \
+ t svcRtx##f (void); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ svc##f(svcRtx##f); \
+}
+
+#define SVC0_0(f,t) \
+__SVC_INDIRECT(0) t svc##f (t(*)()); \
+ t svcRtx##f (void); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ return svc##f(svcRtx##f); \
+}
+
+#define SVC0_0D SVC0_0
+
+#define SVC0_1N(f,t,t1) \
+__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
+ t svcRtx##f (t1 a1); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ svc##f(svcRtx##f,a1); \
+}
+
+#define SVC0_1(f,t,t1) \
+__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
+ t svcRtx##f (t1 a1); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ return svc##f(svcRtx##f,a1); \
+}
+
+#define SVC0_2(f,t,t1,t2) \
+__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \
+ t svcRtx##f (t1 a1, t2 a2); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
+ return svc##f(svcRtx##f,a1,a2); \
+}
+
+#define SVC0_3(f,t,t1,t2,t3) \
+__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \
+ t svcRtx##f (t1 a1, t2 a2, t3 a3); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
+ return svc##f(svcRtx##f,a1,a2,a3); \
+}
+
+#define SVC0_4(f,t,t1,t2,t3,t4) \
+__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \
+ t svcRtx##f (t1 a1, t2 a2, t3 a3, t4 a4); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
+ return svc##f(svcRtx##f,a1,a2,a3,a4); \
+}
+
+#define SVC0_0M SVC0_0
+#define SVC0_1M SVC0_1
+#define SVC0_2M SVC0_2
+#define SVC0_3M SVC0_3
+#define SVC0_4M SVC0_4
+
+#elif defined(__ICCARM__)
+
+#define SVC_Setup(f) \
+ __asm( \
+ "mov r12,%0\n" \
+ :: "r"(&f): "r12" \
+ );
+
+#define STRINGIFY(a) #a
+#define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
+
+#define SVC0_0N(f,t) \
+__SVC_INDIRECT(0) t svc##f (); \
+ t svcRtx##f (void); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ SVC_Setup(svcRtx##f); \
+ svc##f(); \
+}
+
+#define SVC0_0(f,t) \
+__SVC_INDIRECT(0) t svc##f (); \
+ t svcRtx##f (void); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ SVC_Setup(svcRtx##f); \
+ return svc##f(); \
+}
+
+#define SVC0_0D SVC0_0
+
+#define SVC0_1N(f,t,t1) \
+__SVC_INDIRECT(0) t svc##f (t1 a1); \
+ t svcRtx##f (t1 a1); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ SVC_Setup(svcRtx##f); \
+ svc##f(a1); \
+}
+
+#define SVC0_1(f,t,t1) \
+__SVC_INDIRECT(0) t svc##f (t1 a1); \
+ t svcRtx##f (t1 a1); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ SVC_Setup(svcRtx##f); \
+ return svc##f(a1); \
+}
+
+#define SVC0_2(f,t,t1,t2) \
+__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \
+ t svcRtx##f (t1 a1, t2 a2); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
+ SVC_Setup(svcRtx##f); \
+ return svc##f(a1,a2); \
+}
+
+#define SVC0_3(f,t,t1,t2,t3) \
+__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \
+ t svcRtx##f (t1 a1, t2 a2, t3 a3); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
+ SVC_Setup(svcRtx##f); \
+ return svc##f(a1,a2,a3); \
+}
+
+#define SVC0_4(f,t,t1,t2,t3,t4) \
+__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \
+ t svcRtx##f (t1 a1, t2 a2, t3 a3, t4 a4); \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
+ SVC_Setup(svcRtx##f); \
+ return svc##f(a1,a2,a3,a4); \
+}
+
+#define SVC0_0M SVC0_0
+#define SVC0_1M SVC0_1
+#define SVC0_2M SVC0_2
+#define SVC0_3M SVC0_3
+#define SVC0_4M SVC0_4
+
+#else // !(defined(__CC_ARM) || defined(__ICCARM__))
+
+#define SVC_RegF "r12"
+
+#define SVC_ArgN(n) \
+register uint32_t __r##n __ASM("r"#n)
+
+#define SVC_ArgR(n,a) \
+register uint32_t __r##n __ASM("r"#n) = (uint32_t)a
+
+#define SVC_ArgF(f) \
+register uint32_t __rf __ASM(SVC_RegF) = (uint32_t)f
+
+#define SVC_In0 "r"(__rf)
+#define SVC_In1 "r"(__rf),"r"(__r0)
+#define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1)
+#define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2)
+#define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3)
+
+#define SVC_Out0
+#define SVC_Out1 "=r"(__r0)
+#define SVC_Out2 "=r"(__r0),"=r"(__r1)
+
+#define SVC_CL0
+#define SVC_CL1 "r1"
+#define SVC_CL2 "r0","r1"
+
+#define SVC_Call0(in, out, cl) \
+ __ASM volatile ("svc 0" : out : in : cl)
+
+#define SVC0_0N(f,t) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2); \
+}
+
+#define SVC0_0(f,t) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ SVC_ArgN(0); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1); \
+ return (t) __r0; \
+}
+
+#define SVC0_0D(f,t) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (void) { \
+ SVC_ArgN(0); \
+ SVC_ArgN(1); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In0, SVC_Out2, SVC_CL0); \
+ return (((t) __r0) | (((t) __r1) << 32)); \
+}
+
+#define SVC0_1N(f,t,t1) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ SVC_ArgR(0,a1); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1); \
+}
+
+#define SVC0_1(f,t,t1) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1) { \
+ SVC_ArgR(0,a1); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1); \
+ return (t) __r0; \
+}
+
+#define SVC0_2(f,t,t1,t2) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
+ SVC_ArgR(0,a1); \
+ SVC_ArgR(1,a2); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0); \
+ return (t) __r0; \
+}
+
+#define SVC0_3(f,t,t1,t2,t3) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
+ SVC_ArgR(0,a1); \
+ SVC_ArgR(1,a2); \
+ SVC_ArgR(2,a3); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0); \
+ return (t) __r0; \
+}
+
+#define SVC0_4(f,t,t1,t2,t3,t4) \
+__attribute__((always_inline)) \
+__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
+ SVC_ArgR(0,a1); \
+ SVC_ArgR(1,a2); \
+ SVC_ArgR(2,a3); \
+ SVC_ArgR(3,a4); \
+ SVC_ArgF(svcRtx##f); \
+ SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0); \
+ return (t) __r0; \
+}
+
+#define SVC0_0M SVC0_0
+#define SVC0_1M SVC0_1
+#define SVC0_2M SVC0_2
+#define SVC0_3M SVC0_3
+#define SVC0_4M SVC0_4
+
+#endif
+
+
+// ==== Core Peripherals functions ====
+
+extern uint32_t SystemCoreClock; // System Clock Frequency (Core Clock)
+
+/// Initialize SVC and PendSV System Service Calls (not needed on Cortex-A)
+__STATIC_INLINE void SVC_Initialize (void) {
+}
+
+/// Setup External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+extern void ExtTick_SetupIRQ (int32_t irqn);
+
+/// Enable External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+extern void ExtTick_EnableIRQ (int32_t irqn);
+
+/// Disable External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+extern void ExtTick_DisableIRQ (int32_t irqn);
+
+/// Get Pending SV (Service Call) and ST (SysTick) Flags
+/// \return Pending SV&ST Flags
+__STATIC_INLINE uint8_t GetPendSV_ST (void) {
+ return (0U);
+}
+
+/// Get Pending SV (Service Call) Flag
+/// \return Pending SV Flag
+extern uint8_t GetPendSV (void);
+
+/// Clear Pending SV (Service Call) and ST (SysTick) Flags
+__STATIC_INLINE void ClrPendSV_ST (void) {
+}
+
+/// Clear Pending SV (Service Call) Flag
+extern void ClrPendSV (void);
+
+/// Set Pending SV (Service Call) Flag
+extern void SetPendSV (void);
+
+/// Set Pending Flags
+/// \param[in] flags Flags to set
+extern void SetPendFlags (uint8_t flags);
+
+
+// ==== Exclusive Access Operation ====
+
+#if (__EXCLUSIVE_ACCESS == 1U)
+
+/// Atomic Access Operation: Write (8-bit)
+/// \param[in] mem Memory address
+/// \param[in] val Value to write
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
+ mov r2,r0
+1
+ ldrexb r0,[r2]
+ strexb r3,r1,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint8_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrexb %[ret],[%[mem]]\n\t"
+ "strexb %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n\t"
+ : [ret] "=&l" (ret),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [val] "l" (val)
+ : "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Set bits (32-bit)
+/// \param[in] mem Memory address
+/// \param[in] bits Bit mask
+/// \return New value
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ orr r0,r0,r1
+ strex r3,r0,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[val],[%[mem]]\n\t"
+ "orr %[ret],%[val],%[bits]\n\t"
+ "strex %[res],%[ret],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [bits] "l" (bits)
+ : "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Clear bits (32-bit)
+/// \param[in] mem Memory address
+/// \param[in] bits Bit mask
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ bic r4,r0,r1
+ strex r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "bic %[val],%[ret],%[bits]\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [bits] "l" (bits)
+ : "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Check if all specified bits (32-bit) are active and clear them
+/// \param[in] mem Memory address
+/// \param[in] bits Bit mask
+/// \return Active bits before clearing or 0 if not active
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ and r4,r0,r1
+ cmp r4,r1
+ beq %F2
+ clrex
+ movs r0,#0
+ pop {r4,pc}
+2
+ bic r4,r0,r1
+ strex r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "and %[val],%[ret],%[bits]\n\t"
+ "cmp %[val],%[bits]\n\t"
+ "beq 2f\n\t"
+ "clrex\n\t"
+ "movs %[ret],#0\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "bic %[val],%[ret],%[bits]\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [bits] "l" (bits)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Check if any specified bits (32-bit) are active and clear them
+/// \param[in] mem Memory address
+/// \param[in] bits Bit mask
+/// \return Active bits before clearing or 0 if not active
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ tst r0,r1
+ bne %F2
+ clrex
+ movs r0,#0
+ pop {r4,pc}
+2
+ bic r4,r0,r1
+ strex r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "tst %[ret],%[bits]\n\t"
+ "bne 2f\n\t"
+ "clrex\n\t"
+ "movs %[ret],#0\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "bic %[val],%[ret],%[bits]\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [bits] "l" (bits)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Increment (32-bit)
+/// \param[in] mem Memory address
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_inc32 (uint32_t *mem) {
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ adds r1,r0,#1
+ strex r3,r1,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "adds %[val],%[ret],#1\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// atomic Access Operation: Increment (32-bit) if Less Than
+/// \param[in] mem Memory address
+/// \param[in] max Maximum value
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_inc32_lt (uint32_t *mem, uint32_t max) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ cmp r1,r0
+ bhi %F2
+ clrex
+ pop {r4,pc}
+2
+ adds r4,r0,#1
+ strex r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint32_t atomic_inc32_lt (uint32_t *mem, uint32_t max) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "cmp %[max],%[ret]\n\t"
+ "bhi 2f\n\t"
+ "clrex\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "adds %[val],%[ret],#1\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [max] "l" (max)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Increment (16-bit) if Less Than
+/// \param[in] mem Memory address
+/// \param[in] max Maximum value
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrexh r0,[r2]
+ cmp r1,r0
+ bhi %F2
+ clrex
+ pop {r4,pc}
+2
+ adds r4,r0,#1
+ strexh r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint16_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrexh %[ret],[%[mem]]\n\t"
+ "cmp %[max],%[ret]\n\t"
+ "bhi 2f\n\t"
+ "clrex\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "adds %[val],%[ret],#1\n\t"
+ "strexh %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [max] "l" (max)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Increment (16-bit) and clear on Limit
+/// \param[in] mem Memory address
+/// \param[in] max Maximum value
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
+ push {r4,lr}
+ mov r2,r0
+1
+ ldrexh r0,[r2]
+ adds r4,r0,#1
+ cmp r1,r4
+ bhi %F2
+ movs r4,#0
+2
+ strexh r3,r4,[r2]
+ cmp r3,#0
+ bne %B1
+ pop {r4,pc}
+}
+#else
+__STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint16_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrexh %[ret],[%[mem]]\n\t"
+ "adds %[val],%[ret],#1\n\t"
+ "cmp %[lim],%[val]\n\t"
+ "bhi 2f\n\t"
+ "movs %[val],#0\n"
+ "2:\n\t"
+ "strexh %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem),
+ [lim] "l" (lim)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Decrement (32-bit) if Not Zero
+/// \param[in] mem Memory address
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint32_t atomic_dec32_nz (uint32_t *mem) {
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ cmp r0,#0
+ bne %F2
+ clrex
+ bx lr
+2
+ subs r1,r0,#1
+ strex r3,r1,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint32_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[mem]]\n\t"
+ "cmp %[ret],#0\n\t"
+ "bne 2f\n"
+ "clrex\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "subs %[val],%[ret],#1\n\t"
+ "strex %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Decrement (16-bit) if Not Zero
+/// \param[in] mem Memory address
+/// \return Previous value
+#if defined(__CC_ARM)
+static __asm uint16_t atomic_dec16_nz (uint16_t *mem) {
+ mov r2,r0
+1
+ ldrexh r0,[r2]
+ cmp r0,#0
+ bne %F2
+ clrex
+ bx lr
+2
+ subs r1,r0,#1
+ strexh r3,r1,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register uint16_t ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrexh %[ret],[%[mem]]\n\t"
+ "cmp %[ret],#0\n\t"
+ "bne 2f\n\t"
+ "clrex\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "subs %[val],%[ret],#1\n\t"
+ "strexh %[res],%[val],[%[mem]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [mem] "l" (mem)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Link Get
+/// \param[in] root Root address
+/// \return Link
+#if defined(__CC_ARM)
+static __asm void *atomic_link_get (void **root) {
+ mov r2,r0
+1
+ ldrex r0,[r2]
+ cmp r0,#0
+ bne %F2
+ clrex
+ bx lr
+2
+ ldr r1,[r0]
+ strex r3,r1,[r2]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE void *atomic_link_get (void **root) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+ register void *ret;
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldrex %[ret],[%[root]]\n\t"
+ "cmp %[ret],#0\n\t"
+ "bne 2f\n\t"
+ "clrex\n\t"
+ "b 3f\n"
+ "2:\n\t"
+ "ldr %[val],[%[ret]]\n\t"
+ "strex %[res],%[val],[%[root]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ "3:"
+ : [ret] "=&l" (ret),
+ [val] "=&l" (val),
+ [res] "=&l" (res)
+ : [root] "l" (root)
+ : "cc", "memory"
+ );
+
+ return ret;
+}
+#endif
+
+/// Atomic Access Operation: Link Put
+/// \param[in] root Root address
+/// \param[in] lnk Link
+#if defined(__CC_ARM)
+static __asm void atomic_link_put (void **root, void *link) {
+1
+ ldr r2,[r0]
+ str r2,[r1]
+ dmb
+ ldrex r2,[r0]
+ ldr r3,[r1]
+ cmp r3,r2
+ bne %B1
+ strex r3,r1,[r0]
+ cmp r3,#0
+ bne %B1
+ bx lr
+}
+#else
+__STATIC_INLINE void atomic_link_put (void **root, void *link) {
+#ifdef __ICCARM__
+#pragma diag_suppress=Pe550
+#endif
+ register uint32_t val1, val2, res;
+#ifdef __ICCARM__
+#pragma diag_default=Pe550
+#endif
+
+ __ASM volatile (
+#ifndef __ICCARM__
+ ".syntax unified\n\t"
+#endif
+ "1:\n\t"
+ "ldr %[val1],[%[root]]\n\t"
+ "str %[val1],[%[link]]\n\t"
+ "dmb\n\t"
+ "ldrex %[val1],[%[root]]\n\t"
+ "ldr %[val2],[%[link]]\n\t"
+ "cmp %[val2],%[val1]\n\t"
+ "bne 1b\n\t"
+ "strex %[res],%[link],[%[root]]\n\t"
+ "cmp %[res],#0\n\t"
+ "bne 1b\n"
+ : [val1] "=&l" (val1),
+ [val2] "=&l" (val2),
+ [res] "=&l" (res)
+ : [root] "l" (root),
+ [link] "l" (link)
+ : "cc", "memory"
+ );
+}
+#endif
+
+#endif // (__EXCLUSIVE_ACCESS == 1U)
+
+
+#endif // RTX_CORE_CA_H_
diff --git a/CMSIS/RTOS2/RTX/Source/core_cm.h b/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h
similarity index 98%
rename from CMSIS/RTOS2/RTX/Source/core_cm.h
rename to CMSIS/RTOS2/RTX/Source/rtx_core_cm.h
index 93c77e1..5cab82a 100644
--- a/CMSIS/RTOS2/RTX/Source/core_cm.h
+++ b/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h
@@ -23,36 +23,12 @@
* -----------------------------------------------------------------------------
*/
-#ifndef CORE_CM_H_
-#define CORE_CM_H_
+#ifndef RTX_CORE_CM_H_
+#define RTX_CORE_CM_H_
#include "RTE_Components.h"
#include CMSIS_device_header
-#ifndef __ARM_ARCH_6M__
-#define __ARM_ARCH_6M__ 0U
-#endif
-#ifndef __ARM_ARCH_7M__
-#define __ARM_ARCH_7M__ 0U
-#endif
-#ifndef __ARM_ARCH_7EM__
-#define __ARM_ARCH_7EM__ 0U
-#endif
-#ifndef __ARM_ARCH_8M_BASE__
-#define __ARM_ARCH_8M_BASE__ 0U
-#endif
-#ifndef __ARM_ARCH_8M_MAIN__
-#define __ARM_ARCH_8M_MAIN__ 0U
-#endif
-
-#if ((__ARM_ARCH_6M__ + \
- __ARM_ARCH_7M__ + \
- __ARM_ARCH_7EM__ + \
- __ARM_ARCH_8M_BASE__ + \
- __ARM_ARCH_8M_MAIN__) != 1U)
-#error "Unknown ARM Architecture!"
-#endif
-
#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
#define __DOMAIN_NS 1U
#endif
@@ -1521,4 +1497,4 @@
#endif // (__EXCLUSIVE_ACCESS == 1U)
-#endif // CORE_CM_H_
+#endif // RTX_CORE_CM_H_
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_gic.c b/CMSIS/RTOS2/RTX/Source/rtx_gic.c
new file mode 100644
index 0000000..7246892
--- /dev/null
+++ b/CMSIS/RTOS2/RTX/Source/rtx_gic.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Project: CMSIS-RTOS RTX
+ * Title: RTX GIC functions
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#include "RTE_Components.h"
+#include CMSIS_device_header
+
+#include "rtx_lib.h"
+
+#if ((__ARM_ARCH_7A__ == 1U) && (__GIC_PRESENT == 1U))
+
+extern const uint32_t irqRtxGicBase[];
+ const uint32_t irqRtxGicBase[2] = {
+ GIC_DISTRIBUTOR_BASE,
+ GIC_INTERFACE_BASE
+};
+
+
+static IRQn_Type PendSV_IRQn;
+static uint8_t PendSV_Flag = 0U;
+
+
+// Pending supervisor call interface
+// =================================
+
+/// Get Pending SV (Service Call) Flag
+/// \return Pending SV Flag
+uint8_t GetPendSV (void) {
+ uint32_t pend;
+
+ pend = GIC_GetIRQStatus(PendSV_IRQn);
+
+ return ((uint8_t)(pend & 1U));
+}
+
+/// Clear Pending SV (Service Call) Flag
+void ClrPendSV (void) {
+ GIC_ClearPendingIRQ(PendSV_IRQn);
+ PendSV_Flag = 0U;
+}
+
+/// Set Pending SV (Service Call) Flag
+void SetPendSV (void) {
+ PendSV_Flag = 1U;
+ GIC_SetPendingIRQ(PendSV_IRQn);
+}
+
+/// Set Pending Flags
+/// \param[in] flags Flags to set
+void SetPendFlags (uint8_t flags) {
+ if ((flags & 1U) != 0U) {
+ PendSV_Flag = 1U;
+ GIC_SetPendingIRQ(PendSV_IRQn);
+ }
+}
+
+
+// External IRQ handling interface
+// =================================
+
+/// Enable RTX interrupts
+void osRtxIrqUnlock (void) {
+ GIC_EnableIRQ(PendSV_IRQn);
+}
+
+/// Disable RTX interrupts
+void osRtxIrqLock (void) {
+ GIC_DisableIRQ(PendSV_IRQn);
+}
+
+/// Timer/PendSV interrupt handler
+void osRtxIrqHandler (void) {
+
+ if (PendSV_Flag == 0U) {
+ osRtxTick_Handler();
+ } else {
+ ClrPendSV();
+ osRtxPendSV_Handler();
+ }
+}
+
+
+// External tick timer IRQ interface
+// =================================
+
+/// Setup External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+void ExtTick_SetupIRQ (int32_t irqn) {
+ IRQn_Type irq = (IRQn_Type)irqn;
+ uint32_t prio;
+
+ PendSV_IRQn = irq;
+
+ // Disable corresponding IRQ first
+ GIC_DisableIRQ (irq);
+ GIC_ClearPendingIRQ(irq);
+
+ // Write 0xFF to determine priority level
+ GIC_SetPriority(irq, 0xFFU);
+
+ // Read back the number of priority bits
+ prio = GIC_GetPriority(irq);
+
+ // Set lowest possible priority
+ GIC_SetPriority(irq, prio - 1);
+
+ // Set edge-triggered and 1-N model bits
+ GIC_SetLevelModel(irq, 1, 1);
+
+ InterruptHandlerRegister(irq, osRtxIrqHandler);
+}
+
+/// Enable External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+void ExtTick_EnableIRQ (int32_t irqn) {
+ GIC_EnableIRQ((IRQn_Type)irqn);
+}
+
+/// Disable External Tick Timer Interrupt
+/// \param[in] irqn Interrupt number
+void ExtTick_DisableIRQ (int32_t irqn) {
+ GIC_DisableIRQ((IRQn_Type)irqn);
+}
+
+#endif
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_kernel.c b/CMSIS/RTOS2/RTX/Source/rtx_kernel.c
index 72207a7..d06f883 100644
--- a/CMSIS/RTOS2/RTX/Source/rtx_kernel.c
+++ b/CMSIS/RTOS2/RTX/Source/rtx_kernel.c
@@ -279,6 +279,7 @@
}
osRtxThreadSwitch(thread);
+#if (__ARM_ARCH_7A__ == 0U)
if ((osRtxConfig.flags & osRtxConfigPrivilegedMode) != 0U) {
// Privileged Thread mode & PSP
__set_CONTROL(0x02U);
@@ -286,6 +287,7 @@
// Unprivileged Thread mode & PSP
__set_CONTROL(0x03U);
}
+#endif
osRtxInfo.kernel.sys_freq = SystemCoreClock;
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_lib.h b/CMSIS/RTOS2/RTX/Source/rtx_lib.h
index 75d6fca..a2835e9 100644
--- a/CMSIS/RTOS2/RTX/Source/rtx_lib.h
+++ b/CMSIS/RTOS2/RTX/Source/rtx_lib.h
@@ -28,8 +28,10 @@
#include <string.h>
#include <stdbool.h>
-#include "core_cm.h" // Cortex-M definitions
+#include "rtx_core_c.h" // Cortex core definitions
+#if ((__ARM_ARCH_8M_BASE__ != 0) || (__ARM_ARCH_8M_MAIN__ != 0))
#include "tz_context.h" // TrustZone Context API
+#endif
#include "cmsis_os2.h" // CMSIS RTOS API
#include "rtx_os.h" // RTX OS definitions
#include "rtx_evr.h" // RTX Event Recorder definitions
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_system.c b/CMSIS/RTOS2/RTX/Source/rtx_system.c
index c8715cd..a4483fd 100644
--- a/CMSIS/RTOS2/RTX/Source/rtx_system.c
+++ b/CMSIS/RTOS2/RTX/Source/rtx_system.c
@@ -213,30 +213,40 @@
/// Setup System Timer.
__WEAK int32_t osRtxSysTimerSetup (void) {
-
+#ifdef SysTick
// Setup SysTick Timer
SysTick_Setup(osRtxInfo.kernel.sys_freq / osRtxConfig.tick_freq);
return SysTick_IRQn; // Return IRQ number of SysTick
+#else
+ return 0;
+#endif
}
/// Enable System Timer.
__WEAK void osRtxSysTimerEnable (void) {
+#ifdef SysTick
SysTick_Enable();
+#endif
}
/// Disable System Timer.
__WEAK void osRtxSysTimerDisable (void) {
+#ifdef SysTick
SysTick_Disable();
+#endif
}
/// Acknowledge System Timer IRQ.
__WEAK void osRtxSysTimerAckIRQ (void) {
+#ifdef SysTick
SysTick_GetOvf();
+#endif
}
/// Get System Timer count.
__WEAK uint32_t osRtxSysTimerGetCount (void) {
+#ifdef SysTick
uint32_t tick;
uint32_t val;
@@ -249,9 +259,16 @@
val += tick * SysTick_GetPeriod();
return val;
+#else
+ return 0U;
+#endif
}
/// Get System Timer frequency.
__WEAK uint32_t osRtxSysTimerGetFreq (void) {
+#ifdef SysTick
return osRtxInfo.kernel.sys_freq;
+#else
+ return 0U;
+#endif
}
diff --git a/CMSIS/RTOS2/RTX/Source/rtx_thread.c b/CMSIS/RTOS2/RTX/Source/rtx_thread.c
index b49abd3..264e404 100644
--- a/CMSIS/RTOS2/RTX/Source/rtx_thread.c
+++ b/CMSIS/RTOS2/RTX/Source/rtx_thread.c
@@ -377,7 +377,7 @@
/// \param[in] thread thread object.
/// \return pointer to registers R0-R3.
uint32_t *osRtxThreadRegPtr (os_thread_t *thread) {
-
+#if (__ARM_ARCH_7A__ == 0U) /* Cortex-M */
#if (__FPU_USED == 1U)
if (IS_EXTENDED_STACK_FRAME(thread->stack_frame)) {
// Extended Stack Frame: S16-S31, R4-R11, R0-R3, R12, LR, PC, xPSR, S0-S15, FPSCR
@@ -390,6 +390,20 @@
// Stack Frame: R4-R11, R0-R3, R12, LR, PC, xPSR
return ((uint32_t *)(thread->sp + 8U*4U));
#endif
+#else /* Cortex-A */
+ if (IS_VFP_D32_STACK_FRAME(thread->stack_frame)) {
+ /* VFP-D32 Stack Frame: D16-31, D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR */
+ return (uint32_t *)(thread->sp + (8U*4U) + (2U*4U) + (32U*8U));
+ }
+ else if (IS_VFP_D16_STACK_FRAME(thread->stack_frame)) {
+ /* VFP-D16 Stack Frame: D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR */
+ return (uint32_t *)(thread->sp + (8U*4U) + (2U*4U) + (16U*8U));
+ }
+ else {
+ /* Basic Stack Frame: R4-R11, R0-R3, R12, LR, PC, CPSR */
+ return (uint32_t *)(thread->sp + (8U*4U));
+ }
+#endif
}
/// Block running Thread execution and register it as Ready to Run.
@@ -761,7 +775,19 @@
}
*ptr++ = (uint32_t)osThreadExit; // LR
*ptr++ = (uint32_t)func; // PC
+#if (__ARM_ARCH_7A__ == 0U)
*ptr++ = XPSR_INITIAL_VALUE; // xPSR
+#else
+ if ((osRtxConfig.flags & osRtxConfigPrivilegedMode) != 0U) {
+ *ptr = CPSR_INIT_SYSTEM; // CPSR (Mode=System))
+ } else {
+ *ptr = CPSR_INIT_USER; // CPSR (Mode=User)
+ }
+ if (((uint32_t)func & 1U) != 0U) {
+ *ptr |= CPSR_T_BIT; // CPSR (Thumb=1)
+ }
+ ptr++;
+#endif
*(ptr-8) = (uint32_t)argument; // R0
// Register post ISR processing function