CMSIS CORE_A:
- Added Cortex-A core support, ARMCC specific:
  - Core specific register definitions
  - Generic Interrupt Controller functions
  - Generic Timer functions
  - L1 and L2 Cache functions
  - MMU functions
- Added ARMCA7 and ARMCA9 devices
- Added Startup, System and MMU configuration files
SDCMSIS-579
diff --git a/ARM.CMSIS.pdsc b/ARM.CMSIS.pdsc
index 6c8073f..f3db990 100644
--- a/ARM.CMSIS.pdsc
+++ b/ARM.CMSIS.pdsc
@@ -8,6 +8,17 @@
   <url>http://www.keil.com/pack/</url>
 
   <releases>
+    <release version="5.0.2-dev1">
+      CMSIS CORE_A: 
+      - Added Cortex-A core support, ARMCC specific:
+        - Core specific register definitions
+        - Generic Interrupt Controller functions
+        - Generic Timer functions
+        - L1 and L2 Cache functions
+        - MMU functions
+      - Added ARMCA7 and ARMCA9 devices
+      - Added Startup, System and MMU configuration files
+    </release>
     <release version="5.0.2-dev0">
       CMSIS-Core: 5.0.2 (see revision history for details)
       - Added macros __UNALIGNED_UINT16_READ, __UNALIGNED_UINT16_WRITE
@@ -481,6 +492,35 @@
       </device>
     </family>
 
+    <!-- ******************************  Cortex-A7  ****************************** -->
+    <family Dfamily="ARM Cortex A7" Dvendor="ARM:82">
+      <book name="http://infocenter.arm.com/help/topic/com.arm.doc.ddi0464f/index.html" title="Cortex-A7 MPCore Technical Reference Manual"/>
+      <description>
+The Cortex-A7 MPCore processor is a high-performance, low-power processor that implements the ARMv7-A architecture. 
+The Cortex-A7 MPCore processor has one to four processors in a single multiprocessor device with a L1 cache subsystem, 
+an optional integrated GIC, and an optional L2 cache controller.
+      </description>
+   
+      <device Dname="ARMCA7">
+        <processor Dcore="Cortex-A7" DcoreVersion="r0p5" Dfpu="DP_FPU" Dmpu="MPU" Dendian="Configurable"/>
+        <compile header="Device/ARM/ARMCA7/Include/ARMCA7.h" define="ARMCA7"/>
+      </device>
+    </family>
+
+    <!-- ******************************  Cortex-A9  ****************************** -->
+    <family Dfamily="ARM Cortex A9" Dvendor="ARM:82">
+      <book name="http://infocenter.arm.com/help/topic/com.arm.doc.100511_0401_10_en/index.html" title="Cortex‑A9 Technical Reference Manual"/>
+      <description>
+The Cortex-A9 processor is a high-performance, low-power, ARM macrocell with an L1 cache subsystem that provides full virtual memory capabilities.
+The Cortex-A9 processor implements the ARMv7-A architecture and runs 32-bit ARM instructions, 16-bit and 32-bit Thumb instructions,
+and 8-bit Java bytecodes in Jazelle state.
+      </description>
+
+      <device Dname="ARMCA9">
+        <processor Dcore="Cortex-A9" DcoreVersion="r4p1" Dfpu="DP_FPU" Dmpu="MPU" Dendian="Configurable"/>
+        <compile header="Device/ARM/ARMCA9/Include/ARMCA9.h" define="ARMCA9"/>
+      </device>
+    </family>
   </devices>
 
 
@@ -652,6 +692,11 @@
       <accept condition="ARMv7-M Device"/>
       <accept condition="ARMv8-M Device"/>
     </condition>
+    <condition id="ARMv7-A Device">
+      <description>ARMv7-A architecture based device</description>
+      <accept Dcore="Cortex-A7"/>
+      <accept Dcore="Cortex-A9"/>
+    </condition>
 
     <!-- ARM core -->
     <condition id="CM0">
@@ -1664,6 +1709,18 @@
       <require condition="GCC"/>
     </condition>
 
+    <condition id="ARMCA7 CMSIS">
+      <description>Generic ARM Cortex-A7 device startup and depends on CMSIS Core</description>
+      <require Dvendor="ARM:82" Dname="ARMCA7"/>
+      <require Cclass="CMSIS" Cgroup="CORE"/>
+    </condition>
+
+    <condition id="ARMCA9 CMSIS">
+      <description>Generic ARM Cortex-A9 device startup and depends on CMSIS Core</description>
+      <require Dvendor="ARM:82" Dname="ARMCA9"/>
+      <require Cclass="CMSIS" Cgroup="CORE"/>
+    </condition>
+    
     <!-- CMSIS DSP -->
     <condition id="CMSIS DSP">
       <description>Components required for DSP</description>
@@ -1718,6 +1775,14 @@
       </files>
     </component>
 
+    <component Cclass="CMSIS" Cgroup="CORE" Cversion="1.0.0"  condition="ARMv7-A Device" >
+      <description>CMSIS-CORE for Cortex-A</description>
+      <files>
+        <!-- CPU independent -->
+        <file category="include" name="CMSIS/CORE_A/Include/"/>
+      </files>
+    </component>
+
     <!-- CMSIS-Startup components -->
     <!-- Cortex-M0 -->
     <component Cclass="Device" Cgroup="Startup"                      Cversion="1.0.1" condition="ARMCM0 CMSIS">
@@ -2017,6 +2082,33 @@
       </files>
     </component>
 
+    <!-- Cortex-A7 -->
+    <component Cclass="Device" Cgroup="Startup"                      Cversion="1.0.0" condition="ARMCA7 CMSIS">
+      <description>System and Startup for Generic ARM Cortex-A7 device</description>
+      <files>
+        <!-- include folder / device header file -->
+        <file category="include"  name="Device/ARM/ARMCA7/Include/"/>
+        <!-- startup / system / mmu files -->
+        <file category="sourceAsm"    name="Device/ARM/ARMCA7/Source/ARM/startup_ARMCA7.s" version="1.0.0" attr="config" condition="ARMCC"/>
+        <file category="linkerScript" name="Device/ARM/ARMCA7/Source/ARM/ARMCA7.sct"       version="1.0.0" attr="config"/>        
+        <file category="sourceC"      name="Device/ARM/ARMCA7/Source/system_ARMCA7.c"      version="1.0.0" attr="config"/>
+        <file category="sourceC"      name="Device/ARM/ARMCA7/Source/mmu_ARMCA7.c"         version="1.0.0" attr="config"/>
+      </files>
+    </component>
+
+    <!-- Cortex-A9 -->
+    <component Cclass="Device" Cgroup="Startup"                      Cversion="1.0.0" condition="ARMCA9 CMSIS">
+      <description>System and Startup for Generic ARM Cortex-A9 device</description>
+      <files>
+        <!-- include folder / device header file -->
+        <file category="include"  name="Device/ARM/ARMCA9/Include/"/>
+        <!-- startup / system / mmu files -->
+        <file category="sourceAsm"    name="Device/ARM/ARMCA9/Source/ARM/startup_ARMCA9.s" version="1.0.0" attr="config" condition="ARMCC"/>
+        <file category="linkerScript" name="Device/ARM/ARMCA9/Source/ARM/ARMCA9.sct"       version="1.0.0" attr="config"/>
+        <file category="sourceC"      name="Device/ARM/ARMCA9/Source/system_ARMCA9.c"      version="1.0.0" attr="config"/>
+        <file category="sourceC"      name="Device/ARM/ARMCA9/Source/mmu_ARMCA9.c"         version="1.0.0" attr="config"/>
+      </files>
+    </component>
 
     <!-- CMSIS-DSP component -->
     <component Cclass="CMSIS" Cgroup="DSP" Cversion="1.5.1" condition="CMSIS DSP">
diff --git a/CMSIS/CORE_A/Include/cmsis_armcc.h b/CMSIS/CORE_A/Include/cmsis_armcc.h
new file mode 100644
index 0000000..7593b4c
--- /dev/null
+++ b/CMSIS/CORE_A/Include/cmsis_armcc.h
@@ -0,0 +1,697 @@
+/**************************************************************************//**
+ * @file     cmsis_armcc.h
+ * @brief    CMSIS compiler specific macros, functions, instructions
+ * @version  V1.00
+ * @date     22. Feb 2017
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CMSIS_ARMCC_H
+#define __CMSIS_ARMCC_H
+
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677)
+  #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
+#endif
+
+/* CMSIS compiler control architecture macros */
+#if (defined (__TARGET_ARCH_7_A ) && (__TARGET_ARCH_7_A  == 1))
+  #define __ARM_ARCH_7A__           1
+#endif
+
+/* CMSIS compiler specific defines */
+#ifndef   __ASM
+  #define __ASM                     __asm
+#endif
+#ifndef   __INLINE
+  #define __INLINE                  __inline
+#endif
+#ifndef   __STATIC_INLINE
+  #define __STATIC_INLINE           static __inline
+#endif
+#ifndef   __STATIC_ASM
+  #define __STATIC_ASM              static __asm
+#endif
+#ifndef   __NO_RETURN
+  #define __NO_RETURN               __declspec(noreturn)
+#endif
+#ifndef   __USED
+  #define __USED                    __attribute__((used))
+#endif
+#ifndef   __WEAK
+  #define __WEAK                    __attribute__((weak))
+#endif
+#ifndef   __UNALIGNED_UINT32
+  #define __UNALIGNED_UINT32(x)     (*((__packed uint32_t *)(x)))
+#endif
+#ifndef   __ALIGNED
+  #define __ALIGNED(x)              __attribute__((aligned(x)))
+#endif
+#ifndef   __PACKED
+  #define __PACKED                  __attribute__((packed))
+#endif
+
+
+/* ###########################  Core Function Access  ########################### */
+/** \ingroup  CMSIS_Core_FunctionInterface
+    \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+  @{
+ */
+
+/**
+  \brief   Get FPSCR
+  \details Returns the current value of the Floating Point Status/Control register.
+  \return               Floating Point Status/Control register value
+ */
+__STATIC_INLINE uint32_t __get_FPSCR(void)
+{
+#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
+     (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
+  register uint32_t __regfpscr         __ASM("fpscr");
+  return(__regfpscr);
+#else
+   return(0U);
+#endif
+}
+
+/**
+  \brief   Set FPSCR
+  \details Assigns the given value to the Floating Point Status/Control register.
+  \param [in]    fpscr  Floating Point Status/Control value to set
+ */
+__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
+     (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
+  register uint32_t __regfpscr         __ASM("fpscr");
+  __regfpscr = (fpscr);
+#else
+  (void)fpscr;
+#endif
+}
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+
+/* ##########################  Core Instruction Access  ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+  Access to dedicated instructions
+  @{
+*/
+
+/**
+  \brief   No Operation
+  \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+#define __NOP                             __nop
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+#define __WFI                             __wfi
+
+/**
+  \brief   Wait For Event
+  \details Wait For Event is a hint instruction that permits the processor to enter
+           a low-power state until one of a number of events occurs.
+ */
+#define __WFE                             __wfe
+
+/**
+  \brief   Send Event
+  \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+#define __SEV                             __sev
+
+/**
+  \brief   Instruction Synchronization Barrier
+  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+           so that all instructions following the ISB are fetched from cache or memory,
+           after the instruction has been completed.
+ */
+#define __ISB() do {\
+                   __schedule_barrier();\
+                   __isb(0xF);\
+                   __schedule_barrier();\
+                } while (0U)
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+#define __DSB() do {\
+                   __schedule_barrier();\
+                   __dsb(0xF);\
+                   __schedule_barrier();\
+                } while (0U)
+
+/**
+  \brief   Data Memory Barrier
+  \details Ensures the apparent order of the explicit memory operations before
+           and after the instruction, without ensuring their completion.
+ */
+#define __DMB() do {\
+                   __schedule_barrier();\
+                   __dmb(0xF);\
+                   __schedule_barrier();\
+                } while (0U)
+
+/**
+  \brief   Reverse byte order (32 bit)
+  \details Reverses the byte order in integer value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+#define __REV                             __rev
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order in two unsigned short values.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+#ifndef __NO_EMBEDDED_ASM
+__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
+{
+  rev16 r0, r0
+  bx lr
+}
+#endif
+
+/**
+  \brief   Reverse byte order in signed short value
+  \details Reverses the byte order in a signed short value with sign extension to integer.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+#ifndef __NO_EMBEDDED_ASM
+__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
+{
+  revsh r0, r0
+  bx lr
+}
+#endif
+
+/**
+  \brief   Rotate Right in unsigned value (32 bit)
+  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+  \param [in]    op1  Value to rotate
+  \param [in]    op2  Number of Bits to rotate
+  \return               Rotated value
+ */
+#define __ROR                             __ror
+
+/**
+  \brief   Breakpoint
+  \details Causes the processor to enter Debug state.
+           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+  \param [in]    value  is ignored by the processor.
+                 If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value)                       __breakpoint(value)
+
+/**
+  \brief   Reverse bit order of value
+  \details Reverses the bit order of the given value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+  uint32_t result;
+  int32_t s = (4 /*sizeof(v)*/ * 8) - 1; /* extra shift needed at end */
+
+  result = value;                      /* r will be reversed bits of v; first get LSB of v */
+  for (value >>= 1U; value; value >>= 1U)
+  {
+    result <<= 1U;
+    result |= value & 1U;
+    s--;
+  }
+  result <<= s;                        /* shift when v's highest bits are zero */
+  return(result);
+}
+
+/**
+  \brief   Count leading zeros
+  \details Counts the number of leading zeros of a data value.
+  \param [in]  value  Value to count the leading zeros
+  \return             number of leading zeros in value
+ */
+#define __CLZ                             __clz
+
+/** \brief  Get CPSR Register
+
+    This function returns the content of the CPSR Register.
+
+    \return               CPSR Register value
+ */
+__STATIC_INLINE uint32_t __get_CPSR(void)
+{
+  register uint32_t __regCPSR          __ASM("cpsr");
+  return(__regCPSR);
+}
+
+/** \brief  Set Stack Pointer
+
+    This function assigns the given value to the current stack pointer.
+
+    \param [in]    topOfStack  Stack Pointer value to set
+ */
+register uint32_t __regSP              __ASM("sp");
+__STATIC_INLINE void __set_SP(uint32_t topOfStack)
+{
+    __regSP = topOfStack;
+}
+
+
+/** \brief  Get link register
+
+    This function returns the value of the link register
+
+    \return    Value of link register
+ */
+register uint32_t __reglr         __ASM("lr");
+__STATIC_INLINE uint32_t __get_LR(void)
+{
+  return(__reglr);
+}
+
+/** \brief  Set link register
+
+    This function sets the value of the link register
+
+    \param [in]    lr  LR value to set
+ */
+__STATIC_INLINE void __set_LR(uint32_t lr)
+{
+  __reglr = lr;
+}
+
+/** \brief  Set Process Stack Pointer
+
+    This function assigns the given value to the USR/SYS Stack Pointer (PSP).
+
+    \param [in]    topOfProcStack  USR/SYS Stack Pointer value to set
+ */
+__STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
+{
+  ARM
+  PRESERVE8
+
+  BIC     R0, R0, #7  ;ensure stack is 8-byte aligned
+  MRS     R1, CPSR
+  CPS     #0x1F       ;no effect in USR mode
+  MOV     SP, R0
+  MSR     CPSR_c, R1  ;no effect in USR mode
+  ISB
+  BX      LR
+}
+
+/** \brief  Set User Mode
+
+    This function changes the processor state to User Mode
+ */
+__STATIC_ASM void __set_CPS_USR(void)
+{
+  ARM
+
+  CPS  #0x10
+  BX   LR
+}
+
+/** \brief  Get FPEXC
+
+    This function returns the current value of the Floating Point Exception Control register.
+
+    \return               Floating Point Exception Control register value
+ */
+__STATIC_INLINE uint32_t __get_FPEXC(void)
+{
+#if (__FPU_PRESENT == 1)
+  register uint32_t __regfpexc         __ASM("fpexc");
+  return(__regfpexc);
+#else
+  return(0);
+#endif
+}
+
+/** \brief  Set FPEXC
+
+    This function assigns the given value to the Floating Point Exception Control register.
+
+    \param [in]    fpscr  Floating Point Exception Control value to set
+ */
+__STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
+{
+#if (__FPU_PRESENT == 1)
+  register uint32_t __regfpexc         __ASM("fpexc");
+  __regfpexc = (fpexc);
+#endif
+}
+
+/** \brief  Get CPACR
+
+    This function returns the current value of the Coprocessor Access Control register.
+
+    \return               Coprocessor Access Control register value
+ */
+__STATIC_INLINE uint32_t __get_CPACR(void)
+{
+  register uint32_t __regCPACR         __ASM("cp15:0:c1:c0:2");
+  return __regCPACR;
+}
+
+/** \brief  Set CPACR
+
+    This function assigns the given value to the Coprocessor Access Control register.
+
+    \param [in]    cpacr  Coprocessor Acccess Control value to set
+ */
+__STATIC_INLINE void __set_CPACR(uint32_t cpacr)
+{
+  register uint32_t __regCPACR         __ASM("cp15:0:c1:c0:2");
+  __regCPACR = cpacr;
+}
+
+/** \brief  Get CBAR
+
+    This function returns the value of the Configuration Base Address register.
+
+    \return               Configuration Base Address register value
+ */
+__STATIC_INLINE uint32_t __get_CBAR() {
+  register uint32_t __regCBAR         __ASM("cp15:4:c15:c0:0");
+  return(__regCBAR);
+}
+
+/** \brief  Get TTBR0
+
+    This function returns the value of the Translation Table Base Register 0.
+
+    \return               Translation Table Base Register 0 value
+ */
+__STATIC_INLINE uint32_t __get_TTBR0() {
+  register uint32_t __regTTBR0        __ASM("cp15:0:c2:c0:0");
+  return(__regTTBR0);
+}
+
+/** \brief  Set TTBR0
+
+    This function assigns the given value to the Translation Table Base Register 0.
+
+    \param [in]    ttbr0  Translation Table Base Register 0 value to set
+ */
+__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
+  register uint32_t __regTTBR0        __ASM("cp15:0:c2:c0:0");
+  __regTTBR0 = ttbr0;
+}
+
+/** \brief  Get DACR
+
+    This function returns the value of the Domain Access Control Register.
+
+    \return               Domain Access Control Register value
+ */
+__STATIC_INLINE uint32_t __get_DACR() {
+  register uint32_t __regDACR         __ASM("cp15:0:c3:c0:0");
+  return(__regDACR);
+}
+
+/** \brief  Set DACR
+
+    This function assigns the given value to the Domain Access Control Register.
+
+    \param [in]    dacr   Domain Access Control Register value to set
+ */
+__STATIC_INLINE void __set_DACR(uint32_t dacr) {
+  register uint32_t __regDACR         __ASM("cp15:0:c3:c0:0");
+  __regDACR = dacr;
+}
+
+/** \brief  Set SCTLR
+
+    This function assigns the given value to the System Control Register.
+
+    \param [in]    sctlr  System Control Register value to set
+ */
+__STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
+{
+  register uint32_t __regSCTLR         __ASM("cp15:0:c1:c0:0");
+  __regSCTLR = sctlr;
+}
+
+/** \brief  Get SCTLR
+
+    This function returns the value of the System Control Register.
+
+    \return               System Control Register value
+ */
+__STATIC_INLINE uint32_t __get_SCTLR() {
+  register uint32_t __regSCTLR         __ASM("cp15:0:c1:c0:0");
+  return(__regSCTLR);
+}
+
+/** \brief  Set CNTP_TVAL
+
+  This function assigns the given value to PL1 Physical Timer Value Register (CNTP_TVAL).
+
+  \param [in]    value  CNTP_TVAL Register value to set
+*/
+__STATIC_INLINE void __set_CNTP_TVAL(uint32_t value) {
+  register uint32_t __regCNTP_TVAL         __ASM("cp15:0:c14:c2:0");
+  __regCNTP_TVAL = value;
+}
+
+/** \brief  Get CNTP_TVAL
+
+    This function returns the value of the PL1 Physical Timer Value Register (CNTP_TVAL).
+
+    \return               CNTP_TVAL Register value
+ */
+__STATIC_INLINE uint32_t __get_CNTP_TVAL() {
+  register uint32_t __regCNTP_TVAL         __ASM("cp15:0:c14:c2:0");
+  return(__regCNTP_TVAL);
+}
+
+/** \brief  Set CNTP_CTL
+
+  This function assigns the given value to PL1 Physical Timer Control Register (CNTP_CTL).
+
+  \param [in]    value  CNTP_CTL Register value to set
+*/
+__STATIC_INLINE void __set_CNTP_CTL(uint32_t value) {
+  register uint32_t __regCNTP_CTL          __ASM("cp15:0:c14:c2:1");
+  __regCNTP_CTL = value;
+}
+
+/** \brief  Set TLBIALL
+
+  TLB Invalidate All
+ */
+__STATIC_INLINE void __set_TLBIALL(uint32_t value) {
+  register uint32_t __TLBIALL              __ASM("cp15:0:c8:c7:0");
+  __TLBIALL = value;
+}
+
+/** \brief  Set BPIALL.
+
+  Branch Predictor Invalidate All
+ */
+__STATIC_INLINE void __set_BPIALL(uint32_t value) {
+  register uint32_t __BPIALL            __ASM("cp15:0:c7:c5:6");
+  __BPIALL = value;
+}
+
+/** \brief  Set ICIALLU
+
+  Instruction Cache Invalidate All
+ */
+__STATIC_INLINE void __set_ICIALLU(uint32_t value) {
+  register uint32_t __ICIALLU         __ASM("cp15:0:c7:c5:0");
+  __ICIALLU = value;
+}
+
+/** \brief  Set DCCMVAC
+
+  Data cache clean
+ */
+__STATIC_INLINE void __set_DCCMVAC(uint32_t value) {
+  register uint32_t __DCCMVAC         __ASM("cp15:0:c7:c10:1");
+  __DCCMVAC = value;
+}
+
+/** \brief  Set DCIMVAC
+
+  Data cache invalidate
+ */
+__STATIC_INLINE void __set_DCIMVAC(uint32_t value) {
+  register uint32_t __DCIMVAC         __ASM("cp15:0:c7:c6:1");
+  __DCIMVAC = value;
+}
+
+/** \brief  Set DCCIMVAC
+
+  Data cache clean and invalidate
+ */
+__STATIC_INLINE void __set_DCCIMVAC(uint32_t value) {
+  register uint32_t __DCCIMVAC        __ASM("cp15:0:c7:c14:1");
+  __DCCIMVAC = value;
+}
+
+/** \brief  Clean and Invalidate the entire data or unified cache
+
+  Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
+ */
+#pragma push
+#pragma arm
+__STATIC_ASM void __L1C_CleanInvalidateCache(uint32_t op) {
+        ARM
+
+        PUSH    {R4-R11}
+
+        MRC     p15, 1, R6, c0, c0, 1      // Read CLIDR
+        ANDS    R3, R6, #0x07000000        // Extract coherency level
+        MOV     R3, R3, LSR #23            // Total cache levels << 1
+        BEQ     Finished                   // If 0, no need to clean
+
+        MOV     R10, #0                    // R10 holds current cache level << 1
+Loop1   ADD     R2, R10, R10, LSR #1       // R2 holds cache "Set" position
+        MOV     R1, R6, LSR R2             // Bottom 3 bits are the Cache-type for this level
+        AND     R1, R1, #7                 // Isolate those lower 3 bits
+        CMP     R1, #2
+        BLT     Skip                       // No cache or only instruction cache at this level
+
+        MCR     p15, 2, R10, c0, c0, 0     // Write the Cache Size selection register
+        ISB                                // ISB to sync the change to the CacheSizeID reg
+        MRC     p15, 1, R1, c0, c0, 0      // Reads current Cache Size ID register
+        AND     R2, R1, #7                 // Extract the line length field
+        ADD     R2, R2, #4                 // Add 4 for the line length offset (log2 16 bytes)
+        LDR     R4, =0x3FF
+        ANDS    R4, R4, R1, LSR #3         // R4 is the max number on the way size (right aligned)
+        CLZ     R5, R4                     // R5 is the bit position of the way size increment
+        LDR     R7, =0x7FFF
+        ANDS    R7, R7, R1, LSR #13        // R7 is the max number of the index size (right aligned)
+
+Loop2   MOV     R9, R4                     // R9 working copy of the max way size (right aligned)
+
+Loop3   ORR     R11, R10, R9, LSL R5       // Factor in the Way number and cache number into R11
+        ORR     R11, R11, R7, LSL R2       // Factor in the Set number
+        CMP     R0, #0
+        BNE     Dccsw
+        MCR     p15, 0, R11, c7, c6, 2     // DCISW. Invalidate by Set/Way
+        B       cont
+Dccsw   CMP     R0, #1
+        BNE     Dccisw
+        MCR     p15, 0, R11, c7, c10, 2    // DCCSW. Clean by Set/Way
+        B       cont
+Dccisw  MCR     p15, 0, R11, c7, c14, 2    // DCCISW. Clean and Invalidate by Set/Way
+cont    SUBS    R9, R9, #1                 // Decrement the Way number
+        BGE     Loop3
+        SUBS    R7, R7, #1                 // Decrement the Set number
+        BGE     Loop2
+Skip    ADD     R10, R10, #2               // Increment the cache number
+        CMP     R3, R10
+        BGT     Loop1
+
+Finished
+        DSB
+        POP    {R4-R11}
+        BX     lr
+}
+#pragma pop
+
+/** \brief  Enable Floating Point Unit
+
+  Critical section, called from undef handler, so systick is disabled
+ */
+#pragma push
+#pragma arm
+__STATIC_ASM void __FPU_Enable(void) {
+        ARM
+
+        //Permit access to VFP/NEON, registers by modifying CPACR
+        MRC     p15,0,R1,c1,c0,2
+        ORR     R1,R1,#0x00F00000
+        MCR     p15,0,R1,c1,c0,2
+
+        //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
+        ISB
+
+        //Enable VFP/NEON
+        VMRS    R1,FPEXC
+        ORR     R1,R1,#0x40000000
+        VMSR    FPEXC,R1
+
+        //Initialise VFP/NEON registers to 0
+        MOV     R2,#0
+  IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} >= 16
+        //Initialise D16 registers to 0
+        VMOV    D0, R2,R2
+        VMOV    D1, R2,R2
+        VMOV    D2, R2,R2
+        VMOV    D3, R2,R2
+        VMOV    D4, R2,R2
+        VMOV    D5, R2,R2
+        VMOV    D6, R2,R2
+        VMOV    D7, R2,R2
+        VMOV    D8, R2,R2
+        VMOV    D9, R2,R2
+        VMOV    D10,R2,R2
+        VMOV    D11,R2,R2
+        VMOV    D12,R2,R2
+        VMOV    D13,R2,R2
+        VMOV    D14,R2,R2
+        VMOV    D15,R2,R2
+  ENDIF
+  IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
+        //Initialise D32 registers to 0
+        VMOV    D16,R2,R2
+        VMOV    D17,R2,R2
+        VMOV    D18,R2,R2
+        VMOV    D19,R2,R2
+        VMOV    D20,R2,R2
+        VMOV    D21,R2,R2
+        VMOV    D22,R2,R2
+        VMOV    D23,R2,R2
+        VMOV    D24,R2,R2
+        VMOV    D25,R2,R2
+        VMOV    D26,R2,R2
+        VMOV    D27,R2,R2
+        VMOV    D28,R2,R2
+        VMOV    D29,R2,R2
+        VMOV    D30,R2,R2
+        VMOV    D31,R2,R2
+  ENDIF
+
+        //Initialise FPSCR to a known state
+        VMRS    R2,FPSCR
+        LDR     R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
+        AND     R2,R2,R3
+        VMSR    FPSCR,R2
+
+        BX      LR
+}
+#pragma pop
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+
+#endif /* __CMSIS_ARMCC_H */
diff --git a/CMSIS/CORE_A/Include/cmsis_compiler.h b/CMSIS/CORE_A/Include/cmsis_compiler.h
new file mode 100644
index 0000000..9fa0e0e
--- /dev/null
+++ b/CMSIS/CORE_A/Include/cmsis_compiler.h
@@ -0,0 +1,211 @@
+/**************************************************************************//**
+ * @file     cmsis_compiler.h
+ * @brief    CMSIS compiler specific macros, functions, instructions
+ * @version  V1.00
+ * @date     22. Feb 2017
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CMSIS_COMPILER_H
+#define __CMSIS_COMPILER_H
+
+#include <stdint.h>
+
+/*
+ * ARM Compiler 4/5
+ */
+#if   defined ( __CC_ARM )
+  #include "cmsis_armcc.h"
+
+
+/*
+ * ARM Compiler 6 (armclang)
+ */
+#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+  #include "cmsis_armclang.h"
+
+
+/*
+ * GNU Compiler
+ */
+#elif defined ( __GNUC__ )
+  #include "cmsis_gcc.h"
+
+
+/*
+ * IAR Compiler
+ */
+#elif defined ( __ICCARM__ )
+
+  #ifndef   __ASM
+    #define __ASM                     __asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                  inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE           static inline
+  #endif
+
+  #include <cmsis_iar.h>
+
+  #ifndef   __NO_RETURN
+    #define __NO_RETURN               __noreturn
+  #endif
+  #ifndef   __USED
+    #define __USED                    __root
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                    __weak
+  #endif
+  #ifndef   __UNALIGNED_UINT32
+    __packed struct T_UINT32 { uint32_t v; };
+      #define __UNALIGNED_UINT32(x)     (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
+    #define __ALIGNED(x)
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                  __packed
+  #endif
+
+
+/*
+ * TI ARM Compiler
+ */
+#elif defined ( __TI_ARM__ )
+  #include <cmsis_ccs.h>
+
+  #ifndef   __ASM
+    #define __ASM                     __asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                  inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE           static inline
+  #endif
+  #ifndef   __NO_RETURN
+    #define __NO_RETURN               __attribute__((noreturn))
+  #endif
+  #ifndef   __USED
+    #define __USED                    __attribute__((used))
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                    __attribute__((weak))
+  #endif
+  #ifndef   __UNALIGNED_UINT32
+    struct __attribute__((packed)) T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)     (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #define __ALIGNED(x)              __attribute__((aligned(x)))
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                  __attribute__((packed))
+  #endif
+
+
+/*
+ * TASKING Compiler
+ */
+#elif defined ( __TASKING__ )
+  /*
+   * The CMSIS functions have been implemented as intrinsics in the compiler.
+   * Please use "carm -?i" to get an up to date list of all intrinsics,
+   * Including the CMSIS ones.
+   */
+
+  #ifndef   __ASM
+    #define __ASM                     __asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                  inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE           static inline
+  #endif
+  #ifndef   __NO_RETURN
+    #define __NO_RETURN               __attribute__((noreturn))
+  #endif
+  #ifndef   __USED
+    #define __USED                    __attribute__((used))
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                    __attribute__((weak))
+  #endif
+  #ifndef   __UNALIGNED_UINT32
+    struct __packed__ T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)     (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #define __ALIGNED(x)              __align(x)
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                  __packed__
+  #endif
+
+
+/*
+ * COSMIC Compiler
+ */
+#elif defined ( __CSMC__ )
+   #include <cmsis_csm.h>
+
+ #ifndef   __ASM
+    #define __ASM                     _asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                  inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE           static inline
+  #endif
+  #ifndef   __NO_RETURN
+    // NO RETURN is automatically detected hence no warning here
+    #define __NO_RETURN
+  #endif
+  #ifndef   __USED
+    #warning No compiler specific solution for __USED. __USED is ignored.
+    #define __USED
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                    __weak
+  #endif
+  #ifndef   __UNALIGNED_UINT32
+    @packed struct T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)     (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
+    #define __ALIGNED(x)
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                  @packed
+  #endif
+
+
+#else
+  #error Unknown compiler.
+#endif
+
+
+#endif /* __CMSIS_COMPILER_H */
+
diff --git a/CMSIS/CORE_A/Include/core_ca.h b/CMSIS/CORE_A/Include/core_ca.h
new file mode 100644
index 0000000..7d66bf6
--- /dev/null
+++ b/CMSIS/CORE_A/Include/core_ca.h
@@ -0,0 +1,2029 @@
+/**************************************************************************//**
+ * @file     core_ca.h
+ * @brief    CMSIS Cortex-A Core Peripheral Access Layer Header File
+ * @version  V1.00
+ * @date     22. Feb 2017
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined ( __ICCARM__ )
+ #pragma system_include  /* treat file as system include file for MISRA check */
+#endif
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifndef __CORE_CA_H_GENERIC
+#define __CORE_CA_H_GENERIC
+
+
+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions
+  CMSIS violates the following MISRA-C:2004 rules:
+
+   \li Required Rule 8.5, object/function definition in header file.<br>
+     Function definitions in header files are used to allow 'inlining'.
+
+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
+     Unions are used for effective representation of core registers.
+
+   \li Advisory Rule 19.7, Function-like macro defined.<br>
+     Function-like macros are used to allow more efficient code.
+ */
+
+
+/*******************************************************************************
+ *                 CMSIS definitions
+ ******************************************************************************/
+/** \ingroup Cortex_A
+  @{
+ */
+
+/*  CMSIS CA definitions */
+#define __CA_CMSIS_VERSION_MAIN  (1U)                                      /*!< [31:16] CMSIS HAL main version   */
+#define __CA_CMSIS_VERSION_SUB   (0U)                                      /*!< [15:0]  CMSIS HAL sub version    */
+#define __CA_CMSIS_VERSION       ((__CA_CMSIS_VERSION_MAIN << 16U) | \
+                                   __CA_CMSIS_VERSION_SUB          )       /*!< CMSIS HAL version number         */
+
+/** __FPU_USED indicates whether an FPU is used or not. For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions.
+*/
+#if defined ( __CC_ARM )
+  #if defined __TARGET_FPU_VFP
+    #if (__FPU_PRESENT == 1)
+      #define __FPU_USED       1U
+    #else
+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+      #define __FPU_USED       0U
+    #endif
+  #else
+    #define __FPU_USED         0U
+  #endif
+
+#elif defined ( __ICCARM__ )
+  #if defined __ARMVFP__
+    #if (__FPU_PRESENT == 1)
+      #define __FPU_USED       1U
+    #else
+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+      #define __FPU_USED       0U
+    #endif
+  #else
+    #define __FPU_USED         0U
+  #endif
+
+#elif defined ( __TMS470__ )
+  #if defined __TI_VFP_SUPPORT__
+    #if (__FPU_PRESENT == 1)
+      #define __FPU_USED       1U
+    #else
+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+      #define __FPU_USED       0U
+    #endif
+  #else
+    #define __FPU_USED         0U
+  #endif
+
+#elif defined ( __GNUC__ )
+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)
+    #if (__FPU_PRESENT == 1)
+      #define __FPU_USED       1U
+    #else
+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+      #define __FPU_USED       0U
+    #endif
+  #else
+    #define __FPU_USED         0U
+  #endif
+
+#elif defined ( __TASKING__ )
+  #if defined __FPU_VFP__
+    #if (__FPU_PRESENT == 1)
+      #define __FPU_USED       1U
+    #else
+      #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+      #define __FPU_USED       0U
+    #endif
+  #else
+    #define __FPU_USED         0U
+  #endif
+#endif
+
+#include "cmsis_compiler.h"               /* CMSIS compiler specific defines */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CA_H_GENERIC */
+
+#ifndef __CMSIS_GENERIC
+
+#ifndef __CORE_CA_H_DEPENDANT
+#define __CORE_CA_H_DEPENDANT
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+ /* check device defines and use defaults */
+#if defined __CHECK_DEVICE_DEFINES
+  #ifndef __CA_REV
+    #define __CA_REV              0x0000U
+    #warning "__CA_REV not defined in device header file; using default!"
+  #endif
+  
+  #ifndef __FPU_PRESENT
+    #define __FPU_PRESENT             0U
+    #warning "__FPU_PRESENT not defined in device header file; using default!"
+  #endif
+  
+  #ifndef __MPU_PRESENT
+    #define __MPU_PRESENT             0U
+    #warning "__MPU_PRESENT not defined in device header file; using default!"
+  #endif
+#endif
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CMSIS_glob_defs CMSIS Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+  #define   __I     volatile             /*!< Defines 'read only' permissions */
+#else
+  #define   __I     volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O     volatile             /*!< Defines 'write only' permissions */
+#define     __IO    volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const      /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile            /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile            /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group Cortex_A */
+
+
+ /*******************************************************************************
+  *                 Register Abstraction
+   Core Register contain:
+   - CPSR
+   - CP15 Registers
+   - L2C-310 Cache Controller
+   - Generic Interrupt Controller Distributor
+   - Generic Interrupt Controller Interface
+  ******************************************************************************/
+ /**
+   \defgroup CMSIS_core_register Defines and Type Definitions
+   \brief Type definitions and defines for Cortex-A processor based devices.
+ */
+
+/* Core Register CPSR */
+typedef union
+{
+  struct
+  {
+    uint32_t M:5;                        /*!< bit:  0.. 4  Mode field */
+    uint32_t T:1;                        /*!< bit:      5  Thumb execution state bit */
+    uint32_t F:1;                        /*!< bit:      6  FIQ mask bit */
+    uint32_t I:1;                        /*!< bit:      7  IRQ mask bit */
+    uint32_t A:1;                        /*!< bit:      8  Asynchronous abort mask bit */
+    uint32_t E:1;                        /*!< bit:      9  Endianness execution state bit */
+    uint32_t IT1:6;                      /*!< bit: 10..15  If-Then execution state bits 2-7 */
+    uint32_t GE:4;                       /*!< bit: 16..19  Greater than or Equal flags */
+    uint32_t _reserved0:4;               /*!< bit: 20..23  Reserved */
+    uint32_t J:1;                        /*!< bit:     24  Jazelle bit */
+    uint32_t IT0:2;                      /*!< bit: 25..26  If-Then execution state bits 0-1 */
+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag */
+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag */
+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag */
+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag */
+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} CPSR_Type;
+
+/* CPSR Register Definitions */
+#define CPSR_N_Pos                       31U                                    /*!< CPSR: N Position */
+#define CPSR_N_Msk                       (1UL << CPSR_N_Pos)                    /*!< CPSR: N Mask */
+
+#define CPSR_Z_Pos                       30U                                    /*!< CPSR: Z Position */
+#define CPSR_Z_Msk                       (1UL << CPSR_Z_Pos)                    /*!< CPSR: Z Mask */
+
+#define CPSR_C_Pos                       29U                                    /*!< CPSR: C Position */
+#define CPSR_C_Msk                       (1UL << CPSR_C_Pos)                    /*!< CPSR: C Mask */
+
+#define CPSR_V_Pos                       28U                                    /*!< CPSR: V Position */
+#define CPSR_V_Msk                       (1UL << CPSR_V_Pos)                    /*!< CPSR: V Mask */
+
+#define CPSR_Q_Pos                       27U                                    /*!< CPSR: Q Position */
+#define CPSR_Q_Msk                       (1UL << CPSR_Q_Pos)                    /*!< CPSR: Q Mask */
+
+#define CPSR_IT0_Pos                     25U                                    /*!< CPSR: IT0 Position */
+#define CPSR_IT0_Msk                     (3UL << CPSR_IT0_Pos)                  /*!< CPSR: IT0 Mask */
+
+#define CPSR_J_Pos                       24U                                    /*!< CPSR: J Position */
+#define CPSR_J_Msk                       (1UL << CPSR_J_Pos)                    /*!< CPSR: J Mask */
+
+#define CPSR_GE_Pos                      16U                                    /*!< CPSR: GE Position */
+#define CPSR_GE_Msk                      (0xFUL << CPSR_GE_Pos)                 /*!< CPSR: GE Mask */
+
+#define CPSR_IT1_Pos                     10U                                    /*!< CPSR: IT1 Position */
+#define CPSR_IT1_Msk                     (0x3FUL << CPSR_IT1_Pos)               /*!< CPSR: IT1 Mask */
+
+#define CPSR_E_Pos                       9U                                     /*!< CPSR: E Position */
+#define CPSR_E_Msk                       (1UL << CPSR_E_Pos)                    /*!< CPSR: E Mask */
+
+#define CPSR_A_Pos                       8U                                     /*!< CPSR: A Position */
+#define CPSR_A_Msk                       (1UL << CPSR_A_Pos)                    /*!< CPSR: A Mask */
+
+#define CPSR_I_Pos                       7U                                     /*!< CPSR: I Position */
+#define CPSR_I_Msk                       (1UL << CPSR_I_Pos)                    /*!< CPSR: I Mask */
+
+#define CPSR_F_Pos                       6U                                     /*!< CPSR: F Position */
+#define CPSR_F_Msk                       (1UL << CPSR_F_Pos)                    /*!< CPSR: F Mask */
+
+#define CPSR_T_Pos                       5U                                     /*!< CPSR: T Position */
+#define CPSR_T_Msk                       (1UL << CPSR_T_Pos)                    /*!< CPSR: T Mask */
+
+#define CPSR_M_Pos                       0U                                     /*!< CPSR: M Position */
+#define CPSR_M_Msk                       (0x1FUL << CPSR_M_Pos)                 /*!< CPSR: M Mask */
+
+/* CP15 Register SCTLR */
+typedef union
+{
+  struct
+  {
+    uint32_t M:1;                        /*!< bit:     0  MMU enable */
+    uint32_t A:1;                        /*!< bit:     1  Alignment check enable */
+    uint32_t C:1;                        /*!< bit:     2  Cache enable */
+    uint32_t _reserved0:2;               /*!< bit: 3.. 4  Reserved */
+    uint32_t CP15BEN:1;                  /*!< bit:     5  CP15 barrier enable */
+    uint32_t _reserved1:1;               /*!< bit:     6  Reserved */
+    uint32_t B:1;                        /*!< bit:     7  Endianness model */
+    uint32_t _reserved2:2;               /*!< bit: 8.. 9  Reserved */
+    uint32_t SW:1;                       /*!< bit:    10  SWP and SWPB enable */
+    uint32_t Z:1;                        /*!< bit:    11  Branch prediction enable */
+    uint32_t I:1;                        /*!< bit:    12  Instruction cache enable */
+    uint32_t V:1;                        /*!< bit:    13  Vectors bit */
+    uint32_t RR:1;                       /*!< bit:    14  Round Robin select */
+    uint32_t _reserved3:2;               /*!< bit:15..16  Reserved */
+    uint32_t HA:1;                       /*!< bit:    17  Hardware Access flag enable */
+    uint32_t _reserved4:1;               /*!< bit:    18  Reserved */
+    uint32_t WXN:1;                      /*!< bit:    19  Write permission implies XN */
+    uint32_t UWXN:1;                     /*!< bit:    20  Unprivileged write permission implies PL1 XN */
+    uint32_t FI:1;                       /*!< bit:    21  Fast interrupts configuration enable */
+    uint32_t U:1;                        /*!< bit:    22  Alignment model */
+    uint32_t _reserved5:1;               /*!< bit:    23  Reserved */
+    uint32_t VE:1;                       /*!< bit:    24  Interrupt Vectors Enable */
+    uint32_t EE:1;                       /*!< bit:    25  Exception Endianness */
+    uint32_t _reserved6:1;               /*!< bit:    26  Reserved */
+    uint32_t NMFI:1;                     /*!< bit:    27  Non-maskable FIQ (NMFI) support */
+    uint32_t TRE:1;                      /*!< bit:    28  TEX remap enable. */
+    uint32_t AFE:1;                      /*!< bit:    29  Access flag enable */
+    uint32_t TE:1;                       /*!< bit:    30  Thumb Exception enable */
+    uint32_t _reserved7:1;               /*!< bit:    31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} SCTLR_Type;
+
+#define SCTLR_TE_Pos                     30U                                    /*!< SCTLR: TE Position */
+#define SCTLR_TE_Msk                     (1UL << SCTLR_TE_Pos)                  /*!< SCTLR: TE Mask */
+
+#define SCTLR_AFE_Pos                    29U                                    /*!< SCTLR: AFE Position */
+#define SCTLR_AFE_Msk                    (1UL << SCTLR_AFE_Pos)                 /*!< SCTLR: AFE Mask */
+
+#define SCTLR_TRE_Pos                    28U                                    /*!< SCTLR: TRE Position */
+#define SCTLR_TRE_Msk                    (1UL << SCTLR_TRE_Pos)                 /*!< SCTLR: TRE Mask */
+
+#define SCTLR_NMFI_Pos                   27U                                    /*!< SCTLR: NMFI Position */
+#define SCTLR_NMFI_Msk                   (1UL << SCTLR_NMFI_Pos)                /*!< SCTLR: NMFI Mask */
+
+#define SCTLR_EE_Pos                     25U                                    /*!< SCTLR: EE Position */
+#define SCTLR_EE_Msk                     (1UL << SCTLR_EE_Pos)                  /*!< SCTLR: EE Mask */
+
+#define SCTLR_VE_Pos                     24U                                    /*!< SCTLR: VE Position */
+#define SCTLR_VE_Msk                     (1UL << SCTLR_VE_Pos)                  /*!< SCTLR: VE Mask */
+
+#define SCTLR_U_Pos                      22U                                    /*!< SCTLR: U Position */
+#define SCTLR_U_Msk                      (1UL << SCTLR_U_Pos)                   /*!< SCTLR: U Mask */
+
+#define SCTLR_FI_Pos                     21U                                    /*!< SCTLR: FI Position */
+#define SCTLR_FI_Msk                     (1UL << SCTLR_FI_Pos)                  /*!< SCTLR: FI Mask */
+
+#define SCTLR_UWXN_Pos                   20U                                    /*!< SCTLR: UWXN Position */
+#define SCTLR_UWXN_Msk                   (1UL << SCTLR_UWXN_Pos)                /*!< SCTLR: UWXN Mask */
+
+#define SCTLR_WXN_Pos                    19U                                    /*!< SCTLR: WXN Position */
+#define SCTLR_WXN_Msk                    (1UL << SCTLR_WXN_Pos)                 /*!< SCTLR: WXN Mask */
+
+#define SCTLR_HA_Pos                     17U                                    /*!< SCTLR: HA Position */
+#define SCTLR_HA_Msk                     (1UL << SCTLR_HA_Pos)                  /*!< SCTLR: HA Mask */
+
+#define SCTLR_RR_Pos                     14U                                    /*!< SCTLR: RR Position */
+#define SCTLR_RR_Msk                     (1UL << SCTLR_RR_Pos)                  /*!< SCTLR: RR Mask */
+
+#define SCTLR_V_Pos                      13U                                    /*!< SCTLR: V Position */
+#define SCTLR_V_Msk                      (1UL << SCTLR_V_Pos)                   /*!< SCTLR: V Mask */
+
+#define SCTLR_I_Pos                      12U                                    /*!< SCTLR: I Position */
+#define SCTLR_I_Msk                      (1UL << SCTLR_I_Pos)                   /*!< SCTLR: I Mask */
+
+#define SCTLR_Z_Pos                      11U                                    /*!< SCTLR: Z Position */
+#define SCTLR_Z_Msk                      (1UL << SCTLR_Z_Pos)                   /*!< SCTLR: Z Mask */
+
+#define SCTLR_SW_Pos                     10U                                    /*!< SCTLR: SW Position */
+#define SCTLR_SW_Msk                     (1UL << SCTLR_SW_Pos)                  /*!< SCTLR: SW Mask */
+
+#define SCTLR_B_Pos                      7U                                     /*!< SCTLR: B Position */
+#define SCTLR_B_Msk                      (1UL << SCTLR_B_Pos)                   /*!< SCTLR: B Mask */
+
+#define SCTLR_CP15BEN_Pos                5U                                     /*!< SCTLR: CP15BEN Position */
+#define SCTLR_CP15BEN_Msk                (1UL << SCTLR_CP15BEN_Pos)             /*!< SCTLR: CP15BEN Mask */
+
+#define SCTLR_C_Pos                      2U                                     /*!< SCTLR: C Position */
+#define SCTLR_C_Msk                      (1UL << SCTLR_C_Pos)                   /*!< SCTLR: C Mask */
+
+#define SCTLR_A_Pos                      1U                                     /*!< SCTLR: A Position */
+#define SCTLR_A_Msk                      (1UL << SCTLR_A_Pos)                   /*!< SCTLR: A Mask */
+
+#define SCTLR_M_Pos                      0U                                     /*!< SCTLR: M Position */
+#define SCTLR_M_Msk                      (1UL << SCTLR_M_Pos)                   /*!< SCTLR: M Mask */
+
+/* CP15 Register CPACR */
+typedef union
+{
+  struct
+  {
+    uint32_t _reserved0:20;              /*!< bit: 0..19  Reserved */
+    uint32_t cp10:2;                     /*!< bit:20..21  Access rights for coprocessor 10 */
+    uint32_t cp11:2;                     /*!< bit:22..23  Access rights for coprocessor 11 */
+    uint32_t _reserved1:6;               /*!< bit:24..29  Reserved */
+    uint32_t D32DIS:1;                   /*!< bit:    30  Disable use of registers D16-D31 of the VFP register file */
+    uint32_t ASEDIS:1;                   /*!< bit:    31  Disable Advanced SIMD Functionality */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} CPACR_Type;
+
+#define CPACR_ASEDIS_Pos                 31U                                    /*!< CPACR: ASEDIS Position */
+#define CPACR_ASEDIS_Msk                 (1UL << CPACR_ASEDIS_Pos)              /*!< CPACR: ASEDIS Mask */
+
+#define CPACR_D32DIS_Pos                 30U                                    /*!< CPACR: D32DIS Position */
+#define CPACR_D32DIS_Msk                 (1UL << CPACR_D32DIS_Pos)              /*!< CPACR: D32DIS Mask */
+
+#define CPACR_cp11_Pos                   22U                                    /*!< CPACR: cp11 Position */
+#define CPACR_cp11_Msk                   (3UL << CPACR_cp11_Pos)                /*!< CPACR: cp11 Mask */
+
+#define CPACR_cp10_Pos                   20U                                    /*!< CPACR: cp10 Position */
+#define CPACR_cp10_Msk                   (3UL << CPACR_cp10_Pos)                /*!< CPACR: cp10 Mask */
+
+/* CP15 Register DFSR */
+typedef union
+{
+  struct
+  {
+    uint32_t FS0:4;                      /*!< bit: 0.. 3  Fault Status bits bit 0-3 */
+    uint32_t Domain:4;                   /*!< bit: 4.. 7  Fault on which domain */
+    uint32_t _reserved0:2;               /*!< bit: 8.. 9  Reserved */
+    uint32_t FS1:1;                      /*!< bit:    10  Fault Status bits bit 4 */
+    uint32_t WnR:1;                      /*!< bit:    11  Write not Read bit */
+    uint32_t ExT:1;                      /*!< bit:    12  External abort type */
+    uint32_t CM:1;                       /*!< bit:    13  Cache maintenance fault */
+    uint32_t _reserved1:18;              /*!< bit:14..31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} DFSR_Type;
+
+#define DFSR_CM_Pos                      13U                                    /*!< DFSR: CM Position */
+#define DFSR_CM_Msk                      (1UL << DFSR_CM_Pos)                   /*!< DFSR: CM Mask */
+
+#define DFSR_Ext_Pos                     12U                                    /*!< DFSR: Ext Position */
+#define DFSR_Ext_Msk                     (1UL << DFSR_Ext_Pos)                  /*!< DFSR: Ext Mask */
+
+#define DFSR_WnR_Pos                     11U                                    /*!< DFSR: WnR Position */
+#define DFSR_WnR_Msk                     (1UL << DFSR_WnR_Pos)                  /*!< DFSR: WnR Mask */
+
+#define DFSR_FS1_Pos                     10U                                    /*!< DFSR: FS1 Position */
+#define DFSR_FS1_Msk                     (1UL << DFSR_FS1_Pos)                  /*!< DFSR: FS1 Mask */
+
+#define DFSR_Domain_Pos                  4U                                     /*!< DFSR: Domain Position */
+#define DFSR_Domain_Msk                  (0xFUL << DFSR_Domain_Pos)             /*!< DFSR: Domain Mask */
+
+#define DFSR_FS0_Pos                     0U                                     /*!< DFSR: FS0 Position */
+#define DFSR_FS0_Msk                     (0xFUL << DFSR_FS0_Pos)                /*!< DFSR: FS0 Mask */
+
+/* CP15 Register IFSR */
+typedef union
+{
+  struct
+  {
+    uint32_t FS0:4;                      /*!< bit: 0.. 3  Fault Status bits bit 0-3 */
+    uint32_t _reserved0:6;               /*!< bit: 4.. 9  Reserved */
+    uint32_t FS1:1;                      /*!< bit:    10  Fault Status bits bit 4 */
+    uint32_t _reserved1:1;               /*!< bit:    11  Reserved */
+    uint32_t ExT:1;                      /*!< bit:    12  External abort type */
+    uint32_t _reserved2:19;              /*!< bit:13..31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} IFSR_Type;
+
+#define IFSR_ExT_Pos                     12U                                    /*!< IFSR: ExT Position */
+#define IFSR_ExT_Msk                     (1UL << IFSR_ExT_Pos)                  /*!< IFSR: ExT Mask */
+
+#define IFSR_FS1_Pos                     10U                                    /*!< IFSR: FS1 Position */
+#define IFSR_FS1_Msk                     (1UL << IFSR_FS1_Pos)                  /*!< IFSR: FS1 Mask */
+
+#define IFSR_FS0_Pos                     0U                                     /*!< IFSR: FS0 Position */
+#define IFSR_FS0_Msk                     (0xFUL << IFSR_FS0_Pos)                /*!< IFSR: FS0 Mask */
+
+/* CP15 Register ISR */
+typedef union
+{
+  struct
+  {
+    uint32_t _reserved0:6;               /*!< bit: 0.. 5  Reserved */
+    uint32_t F:1;                        /*!< bit:     6  FIQ pending bit */
+    uint32_t I:1;                        /*!< bit:     7  IRQ pending bit */
+    uint32_t A:1;                        /*!< bit:     8  External abort pending bit */
+    uint32_t _reserved1:23;              /*!< bit:14..31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} ISR_Type;
+
+#define ISR_A_Pos                        13U                                    /*!< ISR: A Position */
+#define ISR_A_Msk                        (1UL << ISR_A_Pos)                     /*!< ISR: A Mask */
+
+#define ISR_I_Pos                        12U                                    /*!< ISR: I Position */
+#define ISR_I_Msk                        (1UL << ISR_I_Pos)                     /*!< ISR: I Mask */
+
+#define ISR_F_Pos                        11U                                    /*!< ISR: F Position */
+#define ISR_F_Msk                        (1UL << ISR_F_Pos)                     /*!< ISR: F Mask */
+
+
+/**
+ \brief  Union type to access the L2C_310 Cache Controller.
+*/
+typedef struct
+{
+  __I  uint32_t CACHE_ID;                   /*!< Offset: 0x0000   Cache ID Register               */
+  __I  uint32_t CACHE_TYPE;                 /*!< Offset: 0x0004   Cache Type Register             */
+       uint32_t RESERVED0[0x3e];
+  __IO uint32_t CONTROL;                    /*!< Offset: 0x0100   Control Register                */
+  __IO uint32_t AUX_CNT;                    /*!< Offset: 0x0104   Auxiliary Control               */
+       uint32_t RESERVED1[0x3e];
+  __IO uint32_t EVENT_CONTROL;              /*!< Offset: 0x0200   Event Counter Control           */
+  __IO uint32_t EVENT_COUNTER1_CONF;        /*!< Offset: 0x0204   Event Counter 1 Configuration   */
+  __IO uint32_t EVENT_COUNTER0_CONF;        /*!< Offset: 0x0208   Event Counter 1 Configuration   */
+       uint32_t RESERVED2[0x2];
+  __IO uint32_t INTERRUPT_MASK;             /*!< Offset: 0x0214   Interrupt Mask                  */
+  __I  uint32_t MASKED_INT_STATUS;          /*!< Offset: 0x0218   Masked Interrupt Status         */
+  __I  uint32_t RAW_INT_STATUS;             /*!< Offset: 0x021c   Raw Interrupt Status            */
+  __O  uint32_t INTERRUPT_CLEAR;            /*!< Offset: 0x0220   Interrupt Clear                 */
+       uint32_t RESERVED3[0x143];
+  __IO uint32_t CACHE_SYNC;                 /*!< Offset: 0x0730   Cache Sync                      */
+       uint32_t RESERVED4[0xf];
+  __IO uint32_t INV_LINE_PA;                /*!< Offset: 0x0770   Invalidate Line By PA           */
+       uint32_t RESERVED6[2];
+  __IO uint32_t INV_WAY;                    /*!< Offset: 0x077c   Invalidate by Way               */
+       uint32_t RESERVED5[0xc];
+  __IO uint32_t CLEAN_LINE_PA;              /*!< Offset: 0x07b0   Clean Line by PA                */
+       uint32_t RESERVED7[1];
+  __IO uint32_t CLEAN_LINE_INDEX_WAY;       /*!< Offset: 0x07b8   Clean Line by Index/Way         */
+  __IO uint32_t CLEAN_WAY;                  /*!< Offset: 0x07bc   Clean by Way                    */
+       uint32_t RESERVED8[0xc];
+  __IO uint32_t CLEAN_INV_LINE_PA;          /*!< Offset: 0x07f0   Clean and Invalidate Line by PA  */
+       uint32_t RESERVED9[1];
+  __IO uint32_t CLEAN_INV_LINE_INDEX_WAY;   /*!< Offset: 0x07f8   Clean and Invalidate Line by Index/Way  */
+  __IO uint32_t CLEAN_INV_WAY;              /*!< Offset: 0x07fc   Clean and Invalidate by Way     */
+       uint32_t RESERVED10[0x40];
+  __IO uint32_t DATA_LOCK_0_WAY;            /*!< Offset: 0x0900   Data Lockdown 0 by Way          */
+  __IO uint32_t INST_LOCK_0_WAY;            /*!< Offset: 0x0904   Instruction Lockdown 0 by Way   */
+  __IO uint32_t DATA_LOCK_1_WAY;            /*!< Offset: 0x0908   Data Lockdown 1 by Way          */
+  __IO uint32_t INST_LOCK_1_WAY;            /*!< Offset: 0x090c   Instruction Lockdown 1 by Way   */
+  __IO uint32_t DATA_LOCK_2_WAY;            /*!< Offset: 0x0910   Data Lockdown 2 by Way          */
+  __IO uint32_t INST_LOCK_2_WAY;            /*!< Offset: 0x0914   Instruction Lockdown 2 by Way   */
+  __IO uint32_t DATA_LOCK_3_WAY;            /*!< Offset: 0x0918   Data Lockdown 3 by Way          */
+  __IO uint32_t INST_LOCK_3_WAY;            /*!< Offset: 0x091c   Instruction Lockdown 3 by Way   */
+  __IO uint32_t DATA_LOCK_4_WAY;            /*!< Offset: 0x0920   Data Lockdown 4 by Way          */
+  __IO uint32_t INST_LOCK_4_WAY;            /*!< Offset: 0x0924   Instruction Lockdown 4 by Way   */
+  __IO uint32_t DATA_LOCK_5_WAY;            /*!< Offset: 0x0928   Data Lockdown 5 by Way          */
+  __IO uint32_t INST_LOCK_5_WAY;            /*!< Offset: 0x092c   Instruction Lockdown 5 by Way   */
+  __IO uint32_t DATA_LOCK_6_WAY;            /*!< Offset: 0x0930   Data Lockdown 5 by Way          */
+  __IO uint32_t INST_LOCK_6_WAY;            /*!< Offset: 0x0934   Instruction Lockdown 5 by Way   */
+  __IO uint32_t DATA_LOCK_7_WAY;            /*!< Offset: 0x0938   Data Lockdown 6 by Way          */
+  __IO uint32_t INST_LOCK_7_WAY;            /*!< Offset: 0x093c   Instruction Lockdown 6 by Way   */
+       uint32_t RESERVED11[0x4];
+  __IO uint32_t LOCK_LINE_EN;               /*!< Offset: 0x0950   Lockdown by Line Enable         */
+  __IO uint32_t UNLOCK_ALL_BY_WAY;          /*!< Offset: 0x0954   Unlock All Lines by Way         */
+       uint32_t RESERVED12[0xaa];
+  __IO uint32_t ADDRESS_FILTER_START;       /*!< Offset: 0x0c00   Address Filtering Start         */
+  __IO uint32_t ADDRESS_FILTER_END;         /*!< Offset: 0x0c04   Address Filtering End           */
+       uint32_t RESERVED13[0xce];
+  __IO uint32_t DEBUG_CONTROL;              /*!< Offset: 0x0f40   Debug Control Register          */
+} L2C_310_TypeDef;
+
+#define L2C_310           ((L2C_310_TypeDef *)L2C_310_BASE) /*!< L2C_310 Declaration */
+
+/** \brief  Structure type to access the Generic Interrupt Controller Distributor (GICD)
+*/
+typedef struct
+{
+ __IO uint32_t ICDDCR;
+ __I  uint32_t ICDICTR;
+ __I  uint32_t ICDIIDR;
+      uint32_t RESERVED0[29];
+ __IO uint32_t ICDISR[32];
+ __IO uint32_t ICDISER[32];
+ __IO uint32_t ICDICER[32];
+ __IO uint32_t ICDISPR[32];
+ __IO uint32_t ICDICPR[32];
+ __I  uint32_t ICDABR[32];
+      uint32_t RESERVED1[32];
+ __IO uint32_t ICDIPR[256];
+ __IO uint32_t ICDIPTR[256];
+ __IO uint32_t ICDICFR[64];
+      uint32_t RESERVED2[128];
+ __IO uint32_t ICDSGIR;
+}  GICDistributor_Type;
+
+#define GICDistributor      ((GICDistributor_Type      *)     GIC_DISTRIBUTOR_BASE ) /*!< GIC Distributor configuration struct */
+
+/** \brief  Structure type to access the Generic Interrupt Controller Interface (GICC)
+*/
+typedef struct
+{
+  __IO uint32_t ICCICR;          // +0x000 - RW - CPU Interface Control Register
+  __IO uint32_t ICCPMR;          // +0x004 - RW - Interrupt Priority Mask Register
+  __IO uint32_t ICCBPR;          // +0x008 - RW - Binary Point Register
+  __I  uint32_t ICCIAR;          // +0x00C - RO - Interrupt Acknowledge Register
+  __IO uint32_t ICCEOIR;         // +0x010 - WO - End of Interrupt Register
+  __I  uint32_t ICCRPR;          // +0x014 - RO - Running Priority Register
+  __I  uint32_t ICCHPIR;         // +0x018 - RO - Highest Pending Interrupt Register
+  __IO uint32_t ICCABPR;         // +0x01C - RW - Aliased Binary Point Register
+  uint32_t RESERVED[55];
+  __I  uint32_t ICCIIDR;         // +0x0FC - RO - CPU Interface Identification Register
+}  GICInterface_Type;
+
+#define GICInterface        ((GICInterface_Type        *)     GIC_INTERFACE_BASE )   /*!< GIC Interface configuration struct */
+
+
+ /*******************************************************************************
+  *                Hardware Abstraction Layer
+   Core Function Interface contains:
+   - L1 Cache Functions
+   - L2C-310 Cache Controller Functions 
+   - PL1 Timer Functions
+   - GIC Functions
+   - MMU Functions
+  ******************************************************************************/
+ /**
+   \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
+ */
+
+
+/* ##########################  L1 Cache functions  ################################# */
+
+/** \brief  Enable Caches
+
+  Enable Caches
+ */
+__STATIC_INLINE void L1C_EnableCaches(void) {
+  // Set I bit 12 to enable I Cache
+  // Set C bit  2 to enable D Cache
+  __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
+}
+
+/** \brief  Disable Caches
+
+  Disable Caches
+ */
+__STATIC_INLINE void L1C_DisableCaches(void) {
+  // Clear I bit 12 to disable I Cache
+  // Clear C bit  2 to disable D Cache
+  __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
+  __ISB();
+}
+
+/** \brief  Enable BTAC
+
+  Enable BTAC
+ */
+__STATIC_INLINE void L1C_EnableBTAC(void) {
+  // Set Z bit 11 to enable branch prediction
+  __set_SCTLR( __get_SCTLR() | (1 << 11));
+  __ISB();
+}
+
+/** \brief  Disable BTAC
+
+  Disable BTAC
+ */
+__STATIC_INLINE void L1C_DisableBTAC(void) {
+  // Clear Z bit 11 to disable branch prediction
+  __set_SCTLR( __get_SCTLR() & ~(1 << 11));
+}
+
+/** \brief  Invalidate entire branch predictor array
+
+  BPIALL. Branch Predictor Invalidate All.
+ */
+
+__STATIC_INLINE void L1C_InvalidateBTAC(void) {
+  __set_BPIALL(0);
+  __DSB();     //ensure completion of the invalidation
+  __ISB();     //ensure instruction fetch path sees new state
+}
+
+/** \brief  Invalidate the whole I$
+
+  ICIALLU. Instruction Cache Invalidate All to PoU
+*/
+__STATIC_INLINE void L1C_InvalidateICacheAll(void) {
+  __set_ICIALLU(0);
+  __DSB();     //ensure completion of the invalidation
+  __ISB();     //ensure instruction fetch path sees new I cache state
+}
+
+/** \brief  Clean D$ by MVA
+
+  DCCMVAC. Data cache clean by MVA to PoC
+*/
+__STATIC_INLINE void L1C_CleanDCacheMVA(void *va) {
+  __set_DCCMVAC((uint32_t)va);
+  __DMB();     //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief  Invalidate D$ by MVA
+
+  DCIMVAC. Data cache invalidate by MVA to PoC
+*/
+__STATIC_INLINE void L1C_InvalidateDCacheMVA(void *va) {
+  __set_DCIMVAC((uint32_t)va);
+  __DMB();     //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief  Clean and Invalidate D$ by MVA
+
+  DCCIMVAC. Data cache clean and invalidate by MVA to PoC
+*/
+__STATIC_INLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
+  __set_DCCIMVAC((uint32_t)va);
+  __DMB();     //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief  Clean and Invalidate the entire data or unified cache
+
+  Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
+*/
+__STATIC_INLINE void L1C_CleanInvalidateCache(uint32_t op) {
+  __L1C_CleanInvalidateCache(op);  // compiler specific call
+}
+
+
+/** \brief  Invalidate the whole D$
+
+  DCISW. Invalidate by Set/Way
+*/
+
+__STATIC_INLINE void L1C_InvalidateDCacheAll(void) {
+  L1C_CleanInvalidateCache(0);
+}
+
+/** \brief  Clean the whole D$
+
+    DCCSW. Clean by Set/Way
+ */
+
+__STATIC_INLINE void L1C_CleanDCacheAll(void) {
+  L1C_CleanInvalidateCache(1);
+}
+
+/** \brief  Clean and invalidate the whole D$
+
+    DCCISW. Clean and Invalidate by Set/Way
+ */
+
+__STATIC_INLINE void L1C_CleanInvalidateDCacheAll(void) {
+  L1C_CleanInvalidateCache(2);
+}
+
+
+/* ##########################  L2 Cache functions  ################################# */
+
+//Cache Sync operation
+__STATIC_INLINE void L2C_Sync(void)
+{
+  L2C_310->CACHE_SYNC = 0x0;
+}
+
+//return Cache controller cache ID
+__STATIC_INLINE int L2C_GetID (void)
+{
+  return L2C_310->CACHE_ID;
+}
+
+//return Cache controller cache Type
+__STATIC_INLINE int L2C_GetType (void)
+{
+  return L2C_310->CACHE_TYPE;
+}
+
+//Invalidate all cache by way
+__STATIC_INLINE void L2C_InvAllByWay (void)
+{
+  unsigned int assoc;
+
+  if (L2C_310->AUX_CNT & (1<<16))
+    assoc = 16;
+  else
+    assoc =  8;
+
+  L2C_310->INV_WAY = (1 << assoc) - 1;
+  while(L2C_310->INV_WAY & ((1 << assoc) - 1)); //poll invalidate
+
+  L2C_Sync();
+}
+
+//Clean and Invalidate all cache by way
+__STATIC_INLINE void L2C_CleanInvAllByWay (void)
+{
+  unsigned int assoc;
+
+  if (L2C_310->AUX_CNT & (1<<16))
+    assoc = 16;
+  else
+    assoc =  8;
+
+  L2C_310->CLEAN_INV_WAY = (1 << assoc) - 1;
+  while(L2C_310->CLEAN_INV_WAY & ((1 << assoc) - 1)); //poll invalidate
+
+  L2C_Sync();
+}
+
+//Enable Cache
+__STATIC_INLINE void L2C_Enable(void)
+{
+  L2C_310->CONTROL = 0;
+  L2C_310->INTERRUPT_CLEAR = 0x000001FFuL;
+  L2C_310->DEBUG_CONTROL = 0;
+  L2C_310->DATA_LOCK_0_WAY = 0;
+  L2C_310->CACHE_SYNC = 0;
+  L2C_310->CONTROL = 0x01;
+  L2C_Sync();
+}
+//Disable Cache
+__STATIC_INLINE void L2C_Disable(void)
+{
+  L2C_310->CONTROL = 0x00;
+  L2C_Sync();
+}
+
+//Invalidate cache by physical address
+__STATIC_INLINE void L2C_InvPa (void *pa)
+{
+  L2C_310->INV_LINE_PA = (unsigned int)pa;
+  L2C_Sync();
+}
+
+//Clean cache by physical address
+__STATIC_INLINE void L2C_CleanPa (void *pa)
+{
+  L2C_310->CLEAN_LINE_PA = (unsigned int)pa;
+  L2C_Sync();
+}
+
+//Clean and invalidate cache by physical address
+__STATIC_INLINE void L2C_CleanInvPa (void *pa)
+{
+  L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa;
+  L2C_Sync();
+}
+
+/* ##########################  GIC functions  ###################################### */
+
+__STATIC_INLINE void GIC_EnableDistributor(void)
+{
+  GICDistributor->ICDDCR |= 1; //enable distributor
+}
+
+__STATIC_INLINE void GIC_DisableDistributor(void)
+{
+  GICDistributor->ICDDCR &=~1; //disable distributor
+}
+
+__STATIC_INLINE uint32_t GIC_DistributorInfo(void)
+{
+  return (uint32_t)(GICDistributor->ICDICTR);
+}
+
+__STATIC_INLINE uint32_t GIC_DistributorImplementer(void)
+{
+  return (uint32_t)(GICDistributor->ICDIIDR);
+}
+
+__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target)
+{
+  char* field = (char*)&(GICDistributor->ICDIPTR[IRQn / 4]);
+  field += IRQn % 4;
+  *field = (char)cpu_target & 0xf;
+}
+
+__STATIC_INLINE void GIC_SetICDICFR (const uint32_t *ICDICFRn)
+{
+  uint32_t i, num_irq;
+
+  //Get the maximum number of interrupts that the GIC supports
+  num_irq = 32 * ((GIC_DistributorInfo() & 0x1f) + 1);
+
+  for (i = 0; i < (num_irq/16); i++)
+  {
+    GICDistributor->ICDISPR[i] = *ICDICFRn++;
+  }
+}
+
+__STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn)
+{
+  char* field = (char*)&(GICDistributor->ICDIPTR[IRQn / 4]);
+  field += IRQn % 4;
+  return ((uint32_t)*field & 0xf);
+}
+
+__STATIC_INLINE void GIC_EnableInterface(void)
+{
+  GICInterface->ICCICR |= 1; //enable interface
+}
+
+__STATIC_INLINE void GIC_DisableInterface(void)
+{
+  GICInterface->ICCICR &=~1; //disable distributor
+}
+
+__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void)
+{
+  return (IRQn_Type)(GICInterface->ICCIAR);
+}
+
+__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn)
+{
+  GICInterface->ICCEOIR = IRQn;
+}
+
+__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn)
+{
+  GICDistributor->ICDISER[IRQn / 32] = 1 << (IRQn % 32);
+}
+
+__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn)
+{
+  GICDistributor->ICDICER[IRQn / 32] = 1 << (IRQn % 32);
+}
+
+__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
+{
+  GICDistributor->ICDISPR[IRQn / 32] = 1 << (IRQn % 32);
+}
+
+__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn)
+{
+  GICDistributor->ICDICPR[IRQn / 32] = 1 << (IRQn % 32);
+}
+
+__STATIC_INLINE void GIC_SetLevelModel(IRQn_Type IRQn, int8_t edge_level, int8_t model)
+{   
+  // Word-size read/writes must be used to access this register
+  volatile uint32_t * field = &(GICDistributor->ICDICFR[IRQn / 16]);
+  unsigned bit_shift = (IRQn % 16)<<1;
+  unsigned int save_word;
+
+  save_word = *field;
+  save_word &= (~(3 << bit_shift));
+
+  *field = (save_word | (((edge_level<<1) | model) << bit_shift));
+}
+
+__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
+{
+  char* field = (char*)&(GICDistributor->ICDIPR[IRQn / 4]);
+  field += IRQn % 4;
+  *field = (char)priority;
+}
+
+__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn)
+{
+  char* field = (char*)&(GICDistributor->ICDIPR[IRQn / 4]);
+  field += IRQn % 4;
+  return (uint32_t)*field;
+}
+
+__STATIC_INLINE void GIC_InterfacePriorityMask(uint32_t priority)
+{
+  GICInterface->ICCPMR = priority & 0xff; //set priority mask
+}
+
+__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point)
+{
+  GICInterface->ICCBPR = binary_point & 0x07; //set binary point
+}
+
+__STATIC_INLINE uint32_t GIC_GetBinaryPoint(uint32_t binary_point)
+{
+  return (uint32_t)GICInterface->ICCBPR;
+}
+
+__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn)
+{
+  uint32_t pending, active;
+
+  active = ((GICDistributor->ICDABR[IRQn / 32])  >> (IRQn % 32)) & 0x1;
+  pending =((GICDistributor->ICDISPR[IRQn / 32]) >> (IRQn % 32)) & 0x1;
+
+  return ((active<<1) | pending);
+}
+
+__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list)
+{
+  GICDistributor->ICDSGIR = ((filter_list & 0x3) << 24) | ((target_list & 0xff) << 16) | (IRQn & 0xf);
+}
+
+__STATIC_INLINE void GIC_DistInit(void)
+{
+  IRQn_Type i;
+  uint32_t num_irq = 0;
+  uint32_t priority_field;
+
+  //A reset sets all bits in the ICDISRs corresponding to the SPIs to 0,
+  //configuring all of the interrupts as Secure.
+
+  //Disable interrupt forwarding
+  GIC_DisableDistributor();
+  //Get the maximum number of interrupts that the GIC supports
+  num_irq = 32 * ((GIC_DistributorInfo() & 0x1f) + 1);
+
+  /* Priority level is implementation defined.
+   To determine the number of priority bits implemented write 0xFF to an ICDIPR
+   priority field and read back the value stored.*/
+  GIC_SetPriority((IRQn_Type)0, 0xff);
+  priority_field = GIC_GetPriority((IRQn_Type)0);
+
+  for (i = (IRQn_Type)32; i < num_irq; i++)
+  {
+      //Disable the SPI interrupt
+      GIC_DisableIRQ(i);
+      //Set level-sensitive and 1-N model
+      GIC_SetLevelModel(i, 0, 1);
+      //Set priority
+      GIC_SetPriority(i, priority_field/2);
+      //Set target list to CPU0
+      GIC_SetTarget(i, 1);
+  }
+  //Enable distributor
+  GIC_EnableDistributor();
+}
+
+__STATIC_INLINE void GIC_CPUInterfaceInit(void)
+{
+  IRQn_Type i;
+  uint32_t priority_field;
+
+  //A reset sets all bits in the ICDISRs corresponding to the SPIs to 0,
+  //configuring all of the interrupts as Secure.
+
+  //Disable interrupt forwarding
+  GIC_DisableInterface();
+
+  /* Priority level is implementation defined.
+   To determine the number of priority bits implemented write 0xFF to an ICDIPR
+   priority field and read back the value stored.*/
+  GIC_SetPriority((IRQn_Type)0, 0xff);
+  priority_field = GIC_GetPriority((IRQn_Type)0);
+
+  //SGI and PPI
+  for (i = (IRQn_Type)0; i < 32; i++)
+  {
+      //Set level-sensitive and 1-N model for PPI
+    if(i > 15)
+          GIC_SetLevelModel(i, 0, 1);
+      //Disable SGI and PPI interrupts
+      GIC_DisableIRQ(i);
+      //Set priority
+      GIC_SetPriority(i, priority_field/2);
+  }
+  //Enable interface
+  GIC_EnableInterface();
+  //Set binary point to 0
+  GIC_SetBinaryPoint(0);
+  //Set priority mask
+  GIC_InterfacePriorityMask(0xff);
+}
+
+__STATIC_INLINE void GIC_Enable(void)
+{
+  GIC_DistInit();
+  GIC_CPUInterfaceInit(); //per CPU
+}
+
+/* ##########################  Generic Timer functions  ############################ */
+
+__STATIC_INLINE void PL1_SetTimerValue(uint32_t value) {
+  __set_CNTP_TVAL(value);
+  __ISB();
+}
+
+__STATIC_INLINE uint32_t PL1_GetTimerValue() {
+  return(__get_CNTP_TVAL());
+}
+
+__STATIC_INLINE void PL1_SetTimerCtrl(uint32_t value) {
+  __set_CNTP_CTL(value);
+  __ISB();
+}
+
+
+/* ##########################  MMU functions  ###################################### */
+
+#define SECTION_DESCRIPTOR      (0x2)
+#define SECTION_MASK            (0xFFFFFFFC)
+
+#define SECTION_TEXCB_MASK      (0xFFFF8FF3)
+#define SECTION_B_SHIFT         (2)
+#define SECTION_C_SHIFT         (3)
+#define SECTION_TEX0_SHIFT      (12)
+#define SECTION_TEX1_SHIFT      (13)
+#define SECTION_TEX2_SHIFT      (14)
+
+#define SECTION_XN_MASK         (0xFFFFFFEF)
+#define SECTION_XN_SHIFT        (4)
+
+#define SECTION_DOMAIN_MASK     (0xFFFFFE1F)
+#define SECTION_DOMAIN_SHIFT    (5)
+
+#define SECTION_P_MASK          (0xFFFFFDFF)
+#define SECTION_P_SHIFT         (9)
+
+#define SECTION_AP_MASK         (0xFFFF73FF)
+#define SECTION_AP_SHIFT        (10)
+#define SECTION_AP2_SHIFT       (15)
+
+#define SECTION_S_MASK          (0xFFFEFFFF)
+#define SECTION_S_SHIFT         (16)
+
+#define SECTION_NG_MASK         (0xFFFDFFFF)
+#define SECTION_NG_SHIFT        (17)
+
+#define SECTION_NS_MASK         (0xFFF7FFFF)
+#define SECTION_NS_SHIFT        (19)
+
+#define PAGE_L1_DESCRIPTOR      (0x1)
+#define PAGE_L1_MASK            (0xFFFFFFFC)
+
+#define PAGE_L2_4K_DESC         (0x2)
+#define PAGE_L2_4K_MASK         (0xFFFFFFFD)
+
+#define PAGE_L2_64K_DESC        (0x1)
+#define PAGE_L2_64K_MASK        (0xFFFFFFFC)
+
+#define PAGE_4K_TEXCB_MASK      (0xFFFFFE33)
+#define PAGE_4K_B_SHIFT         (2)
+#define PAGE_4K_C_SHIFT         (3)
+#define PAGE_4K_TEX0_SHIFT      (6)
+#define PAGE_4K_TEX1_SHIFT      (7)
+#define PAGE_4K_TEX2_SHIFT      (8)
+
+#define PAGE_64K_TEXCB_MASK     (0xFFFF8FF3)
+#define PAGE_64K_B_SHIFT        (2)
+#define PAGE_64K_C_SHIFT        (3)
+#define PAGE_64K_TEX0_SHIFT     (12)
+#define PAGE_64K_TEX1_SHIFT     (13)
+#define PAGE_64K_TEX2_SHIFT     (14)
+
+#define PAGE_TEXCB_MASK         (0xFFFF8FF3)
+#define PAGE_B_SHIFT            (2)
+#define PAGE_C_SHIFT            (3)
+#define PAGE_TEX_SHIFT          (12)
+
+#define PAGE_XN_4K_MASK         (0xFFFFFFFE)
+#define PAGE_XN_4K_SHIFT        (0)
+#define PAGE_XN_64K_MASK        (0xFFFF7FFF)
+#define PAGE_XN_64K_SHIFT       (15)
+
+#define PAGE_DOMAIN_MASK        (0xFFFFFE1F)
+#define PAGE_DOMAIN_SHIFT       (5)
+
+#define PAGE_P_MASK             (0xFFFFFDFF)
+#define PAGE_P_SHIFT            (9)
+
+#define PAGE_AP_MASK            (0xFFFFFDCF)
+#define PAGE_AP_SHIFT           (4)
+#define PAGE_AP2_SHIFT          (9)
+
+#define PAGE_S_MASK             (0xFFFFFBFF)
+#define PAGE_S_SHIFT            (10)
+
+#define PAGE_NG_MASK            (0xFFFFF7FF)
+#define PAGE_NG_SHIFT           (11)
+
+#define PAGE_NS_MASK            (0xFFFFFFF7)
+#define PAGE_NS_SHIFT           (3)
+
+#define OFFSET_1M               (0x00100000)
+#define OFFSET_64K              (0x00010000)
+#define OFFSET_4K               (0x00001000)
+
+#define DESCRIPTOR_FAULT        (0x00000000)
+
+/** \ingroup  MMU_FunctionInterface
+    \defgroup MMU_Functions MMU Functions Interface
+  @{
+ */
+
+/* Attributes enumerations */
+
+/* Region size attributes */
+typedef enum
+{
+   SECTION,
+   PAGE_4k,
+   PAGE_64k,
+} mmu_region_size_Type;
+
+/* Region type attributes */
+typedef enum
+{
+   NORMAL,
+   DEVICE,
+   SHARED_DEVICE,
+   NON_SHARED_DEVICE,
+   STRONGLY_ORDERED
+} mmu_memory_Type;
+
+/* Region cacheability attributes */
+typedef enum
+{
+   NON_CACHEABLE,
+   WB_WA,
+   WT,
+   WB_NO_WA,
+} mmu_cacheability_Type;
+
+/* Region parity check attributes */
+typedef enum
+{
+   ECC_DISABLED,
+   ECC_ENABLED,
+} mmu_ecc_check_Type;
+
+/* Region execution attributes */
+typedef enum
+{
+   EXECUTE,
+   NON_EXECUTE,
+} mmu_execute_Type;
+
+/* Region global attributes */
+typedef enum
+{
+   GLOBAL,
+   NON_GLOBAL,
+} mmu_global_Type;
+
+/* Region shareability attributes */
+typedef enum
+{
+   NON_SHARED,
+   SHARED,
+} mmu_shared_Type;
+
+/* Region security attributes */
+typedef enum
+{
+   SECURE,
+   NON_SECURE,
+} mmu_secure_Type;
+
+/* Region access attributes */
+typedef enum
+{
+   NO_ACCESS,
+   RW,
+   READ,
+} mmu_access_Type;
+
+/* Memory Region definition */
+typedef struct RegionStruct {
+    mmu_region_size_Type rg_t;
+    mmu_memory_Type mem_t;
+    uint8_t domain;
+    mmu_cacheability_Type inner_norm_t;
+    mmu_cacheability_Type outer_norm_t;
+    mmu_ecc_check_Type e_t;
+    mmu_execute_Type xn_t;
+    mmu_global_Type g_t;
+    mmu_secure_Type sec_t;
+    mmu_access_Type priv_t;
+    mmu_access_Type user_t;
+    mmu_shared_Type sh_t;
+
+} mmu_region_attributes_Type;
+
+//Following macros define the descriptors and attributes
+//Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
+#define section_normal(descriptor_l1, region)     region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = WB_WA; \
+                                   region.outer_norm_t = WB_WA; \
+                                   region.mem_t = NORMAL; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+
+//Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0
+#define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = WB_WA; \
+                                   region.outer_norm_t = WB_WA; \
+                                   region.mem_t = NORMAL; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = EXECUTE; \
+                                   region.priv_t = READ; \
+                                   region.user_t = READ; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+
+//Sect_Normal_RO. Sect_Normal_Cod, but not executable
+#define section_normal_ro(descriptor_l1, region)  region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = WB_WA; \
+                                   region.outer_norm_t = WB_WA; \
+                                   region.mem_t = NORMAL; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = READ; \
+                                   region.user_t = READ; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+
+//Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable
+#define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = WB_WA; \
+                                   region.outer_norm_t = WB_WA; \
+                                   region.mem_t = NORMAL; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+//Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
+#define section_so(descriptor_l1, region) region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = NON_CACHEABLE; \
+                                   region.outer_norm_t = NON_CACHEABLE; \
+                                   region.mem_t = STRONGLY_ORDERED; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+
+//Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
+#define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = NON_CACHEABLE; \
+                                   region.outer_norm_t = NON_CACHEABLE; \
+                                   region.mem_t = STRONGLY_ORDERED; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = READ; \
+                                   region.user_t = READ; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+
+//Sect_Device_RW. Sect_Device_RO, but writeable
+#define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = NON_CACHEABLE; \
+                                   region.outer_norm_t = NON_CACHEABLE; \
+                                   region.mem_t = STRONGLY_ORDERED; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetSectionDescriptor(&descriptor_l1, region);
+//Page_4k_Device_RW.  Shared device, not executable, rw, domain 0
+#define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = NON_CACHEABLE; \
+                                   region.outer_norm_t = NON_CACHEABLE; \
+                                   region.mem_t = SHARED_DEVICE; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
+
+//Page_64k_Device_RW.  Shared device, not executable, rw, domain 0
+#define page64k_device_rw(descriptor_l1, descriptor_l2, region)  region.rg_t = PAGE_64k; \
+                                   region.domain = 0x0; \
+                                   region.e_t = ECC_DISABLED; \
+                                   region.g_t = GLOBAL; \
+                                   region.inner_norm_t = NON_CACHEABLE; \
+                                   region.outer_norm_t = NON_CACHEABLE; \
+                                   region.mem_t = SHARED_DEVICE; \
+                                   region.sec_t = SECURE; \
+                                   region.xn_t = NON_EXECUTE; \
+                                   region.priv_t = RW; \
+                                   region.user_t = RW; \
+                                   region.sh_t = NON_SHARED; \
+                                   MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
+
+/** \brief  Set section execution-never attribute
+
+  The function sets section execution-never attribute
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]                xn  Section execution-never attribute : EXECUTE , NON_EXECUTE.
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn)
+{
+  *descriptor_l1 &= SECTION_XN_MASK;
+  *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
+  return 0;
+}
+
+/** \brief  Set section domain
+
+  The function sets section domain
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]            domain  Section domain
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain)
+{
+  *descriptor_l1 &= SECTION_DOMAIN_MASK;
+  *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
+  return 0;
+}
+
+/** \brief  Set section parity check
+
+  The function sets section parity check
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]              p_bit Parity check: ECC_DISABLED, ECC_ENABLED
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
+{
+  *descriptor_l1 &= SECTION_P_MASK;
+  *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
+  return 0;
+}
+
+/** \brief  Set section access privileges
+
+  The function sets section access privileges
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]              user  User Level Access: NO_ACCESS, RW, READ
+  \param [in]              priv  Privilege Level Access: NO_ACCESS, RW, READ
+  \param [in]               afe  Access flag enable
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
+{
+  uint32_t ap = 0;
+
+  if (afe == 0) { //full access
+    if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
+    else if ((priv == RW) && (user == NO_ACCESS))   { ap = 0x1; }
+    else if ((priv == RW) && (user == READ))        { ap = 0x2; }
+    else if ((priv == RW) && (user == RW))          { ap = 0x3; }
+    else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+    else if ((priv == READ) && (user == READ))      { ap = 0x7; }
+  }
+
+  else { //Simplified access
+    if ((priv == RW) && (user == NO_ACCESS))        { ap = 0x1; }
+    else if ((priv == RW) && (user == RW))          { ap = 0x3; }
+    else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+    else if ((priv == READ) && (user == READ))      { ap = 0x7; }
+  }
+
+  *descriptor_l1 &= SECTION_AP_MASK;
+  *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
+  *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
+
+  return 0;
+}
+
+/** \brief  Set section shareability
+
+  The function sets section shareability
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]             s_bit  Section shareability: NON_SHARED, SHARED
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
+{
+  *descriptor_l1 &= SECTION_S_MASK;
+  *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
+  return 0;
+}
+
+/** \brief  Set section Global attribute
+
+  The function sets section Global attribute
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]             g_bit  Section attribute: GLOBAL, NON_GLOBAL
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit)
+{
+  *descriptor_l1 &= SECTION_NG_MASK;
+  *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
+  return 0;
+}
+
+/** \brief  Set section Security attribute
+
+  The function sets section Global attribute
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]             s_bit  Section Security attribute: SECURE, NON_SECURE
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
+{
+  *descriptor_l1 &= SECTION_NS_MASK;
+  *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
+  return 0;
+}
+
+/* Page 4k or 64k */
+/** \brief  Set 4k/64k page execution-never attribute
+
+  The function sets 4k/64k page execution-never attribute
+
+  \param [out]    descriptor_l2  L2 descriptor.
+  \param [in]                xn  Page execution-never attribute : EXECUTE , NON_EXECUTE.
+  \param [in]              page  Page size: PAGE_4k, PAGE_64k,
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
+{
+  if (page == PAGE_4k)
+  {
+      *descriptor_l2 &= PAGE_XN_4K_MASK;
+      *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
+  }
+  else
+  {
+      *descriptor_l2 &= PAGE_XN_64K_MASK;
+      *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
+  }
+  return 0;
+}
+
+/** \brief  Set 4k/64k page domain
+
+  The function sets 4k/64k page domain
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]            domain  Page domain
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain)
+{
+  *descriptor_l1 &= PAGE_DOMAIN_MASK;
+  *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
+  return 0;
+}
+
+/** \brief  Set 4k/64k page parity check
+
+  The function sets 4k/64k page parity check
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]              p_bit Parity check: ECC_DISABLED, ECC_ENABLED
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
+{
+  *descriptor_l1 &= SECTION_P_MASK;
+  *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
+  return 0;
+}
+
+/** \brief  Set 4k/64k page access privileges
+
+  The function sets 4k/64k page access privileges
+
+  \param [out]    descriptor_l2  L2 descriptor.
+  \param [in]              user  User Level Access: NO_ACCESS, RW, READ
+  \param [in]              priv  Privilege Level Access: NO_ACCESS, RW, READ
+  \param [in]               afe  Access flag enable
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
+{
+  uint32_t ap = 0;
+
+  if (afe == 0) { //full access
+    if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
+    else if ((priv == RW) && (user == NO_ACCESS))   { ap = 0x1; }
+    else if ((priv == RW) && (user == READ))        { ap = 0x2; }
+    else if ((priv == RW) && (user == RW))          { ap = 0x3; }
+    else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+    else if ((priv == READ) && (user == READ))      { ap = 0x6; }
+  }
+
+  else { //Simplified access
+    if ((priv == RW) && (user == NO_ACCESS))        { ap = 0x1; }
+    else if ((priv == RW) && (user == RW))          { ap = 0x3; }
+    else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+    else if ((priv == READ) && (user == READ))      { ap = 0x7; }
+  }
+
+  *descriptor_l2 &= PAGE_AP_MASK;
+  *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
+  *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
+
+  return 0;
+}
+
+/** \brief  Set 4k/64k page shareability
+
+  The function sets 4k/64k page shareability
+
+  \param [out]    descriptor_l2  L2 descriptor.
+  \param [in]             s_bit  4k/64k page shareability: NON_SHARED, SHARED
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
+{
+  *descriptor_l2 &= PAGE_S_MASK;
+  *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
+  return 0;
+}
+
+/** \brief  Set 4k/64k page Global attribute
+
+  The function sets 4k/64k page Global attribute
+
+  \param [out]    descriptor_l2  L2 descriptor.
+  \param [in]             g_bit  4k/64k page attribute: GLOBAL, NON_GLOBAL
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit)
+{
+  *descriptor_l2 &= PAGE_NG_MASK;
+  *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
+  return 0;
+}
+
+/** \brief  Set 4k/64k page Security attribute
+
+  The function sets 4k/64k page Global attribute
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]             s_bit  4k/64k page Security attribute: SECURE, NON_SECURE
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
+{
+  *descriptor_l1 &= PAGE_NS_MASK;
+  *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
+  return 0;
+}
+
+/** \brief  Set Section memory attributes
+
+  The function sets section memory attributes
+
+  \param [out]    descriptor_l1  L1 descriptor.
+  \param [in]               mem  Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
+  \param [in]             outer  Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+  \param [in]             inner  Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
+{
+  *descriptor_l1 &= SECTION_TEXCB_MASK;
+
+  if (STRONGLY_ORDERED == mem)
+  {
+    return 0;
+  }
+  else if (SHARED_DEVICE == mem)
+  {
+    *descriptor_l1 |= (1 << SECTION_B_SHIFT);
+  }
+  else if (NON_SHARED_DEVICE == mem)
+  {
+    *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
+  }
+  else if (NORMAL == mem)
+  {
+   *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
+   switch(inner)
+   {
+      case NON_CACHEABLE:
+        break;
+      case WB_WA:
+        *descriptor_l1 |= (1 << SECTION_B_SHIFT);
+        break;
+      case WT:
+        *descriptor_l1 |= 1 << SECTION_C_SHIFT;
+        break;
+      case WB_NO_WA:
+        *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
+        break;
+    }
+    switch(outer)
+    {
+      case NON_CACHEABLE:
+        break;
+      case WB_WA:
+        *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
+        break;
+      case WT:
+        *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
+        break;
+      case WB_NO_WA:
+        *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
+        break;
+    }
+  }
+  return 0;
+}
+
+/** \brief  Set 4k/64k page memory attributes
+
+  The function sets 4k/64k page memory attributes
+
+  \param [out]    descriptor_l2  L2 descriptor.
+  \param [in]               mem  4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
+  \param [in]             outer  Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+  \param [in]             inner  Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
+{
+  *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
+
+  if (page == PAGE_64k)
+  {
+    //same as section
+    MMU_MemorySection(descriptor_l2, mem, outer, inner);
+  }
+  else
+  {
+    if (STRONGLY_ORDERED == mem)
+    {
+      return 0;
+    }
+    else if (SHARED_DEVICE == mem)
+    {
+      *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
+    }
+    else if (NON_SHARED_DEVICE == mem)
+    {
+      *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
+    }
+    else if (NORMAL == mem)
+    {
+      *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
+      switch(inner)
+      {
+        case NON_CACHEABLE:
+          break;
+        case WB_WA:
+          *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
+          break;
+        case WT:
+          *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
+          break;
+        case WB_NO_WA:
+          *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
+          break;
+      }
+      switch(outer)
+      {
+        case NON_CACHEABLE:
+          break;
+        case WB_WA:
+          *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
+          break;
+        case WT:
+          *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
+          break;
+        case WB_NO_WA:
+          *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT);
+          break;
+      }
+    }
+  }
+
+  return 0;
+}
+
+/** \brief  Create a L1 section descriptor
+
+  The function creates a section descriptor.
+
+  Assumptions:
+  - 16MB super sections not supported
+  - TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor
+  - Functions always return 0
+
+  \param [out]       descriptor  L1 descriptor
+  \param [out]      descriptor2  L2 descriptor
+  \param [in]               reg  Section attributes
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
+{
+  *descriptor  = 0;
+
+  MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
+  MMU_XNSection(descriptor,reg.xn_t);
+  MMU_DomainSection(descriptor, reg.domain);
+  MMU_PSection(descriptor, reg.e_t);
+  MMU_APSection(descriptor, reg.priv_t, reg.user_t, 1);
+  MMU_SharedSection(descriptor,reg.sh_t);
+  MMU_GlobalSection(descriptor,reg.g_t);
+  MMU_SecureSection(descriptor,reg.sec_t);
+  *descriptor &= SECTION_MASK;
+  *descriptor |= SECTION_DESCRIPTOR;
+ 
+  return 0;
+}
+
+
+/** \brief  Create a L1 and L2 4k/64k page descriptor
+
+  The function creates a 4k/64k page descriptor.
+  Assumptions:
+  - TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor
+  - Functions always return 0
+
+  \param [out]       descriptor  L1 descriptor
+  \param [out]      descriptor2  L2 descriptor
+  \param [in]               reg  4k/64k page attributes
+
+  \return          0
+*/
+__STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
+{
+  *descriptor  = 0;
+  *descriptor2 = 0;
+
+  switch (reg.rg_t)
+  {
+    case PAGE_4k:
+      MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
+      MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k);
+      MMU_DomainPage(descriptor, reg.domain);
+      MMU_PPage(descriptor, reg.e_t);
+      MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
+      MMU_SharedPage(descriptor2,reg.sh_t);
+      MMU_GlobalPage(descriptor2,reg.g_t);
+      MMU_SecurePage(descriptor,reg.sec_t);
+      *descriptor &= PAGE_L1_MASK;
+      *descriptor |= PAGE_L1_DESCRIPTOR;
+      *descriptor2 &= PAGE_L2_4K_MASK;
+      *descriptor2 |= PAGE_L2_4K_DESC;
+      break;
+
+    case PAGE_64k:
+      MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
+      MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k);
+      MMU_DomainPage(descriptor, reg.domain);
+      MMU_PPage(descriptor, reg.e_t);
+      MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
+      MMU_SharedPage(descriptor2,reg.sh_t);
+      MMU_GlobalPage(descriptor2,reg.g_t);
+      MMU_SecurePage(descriptor,reg.sec_t);
+      *descriptor &= PAGE_L1_MASK;
+      *descriptor |= PAGE_L1_DESCRIPTOR;
+      *descriptor2 &= PAGE_L2_64K_MASK;
+      *descriptor2 |= PAGE_L2_64K_DESC;
+      break;
+
+    case SECTION:
+      //error
+      break;
+  }
+  
+  return 0;
+}
+
+/** \brief  Create a 1MB Section
+
+  \param [in]               ttb  Translation table base address
+  \param [in]      base_address  Section base address
+  \param [in]             count  Number of sections to create
+  \param [in]     descriptor_l1  L1 descriptor (region attributes)
+
+*/
+__STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
+{
+  uint32_t offset;
+  uint32_t entry;
+  uint32_t i;
+
+  offset = base_address >> 20;
+  entry  = (base_address & 0xFFF00000) | descriptor_l1;
+
+  //4 bytes aligned
+  ttb = ttb + offset;
+
+  for (i = 0; i < count; i++ )
+  {
+    //4 bytes aligned
+    *ttb++ = entry;
+    entry += OFFSET_1M;
+  }
+}
+
+/** \brief  Create a 4k page entry
+
+  \param [in]               ttb  L1 table base address
+  \param [in]      base_address  4k base address
+  \param [in]             count  Number of 4k pages to create
+  \param [in]     descriptor_l1  L1 descriptor (region attributes)
+  \param [in]            ttb_l2  L2 table base address
+  \param [in]     descriptor_l2  L2 descriptor (region attributes)
+
+*/
+__STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
+{
+
+  uint32_t offset, offset2;
+  uint32_t entry, entry2;
+  uint32_t i;
+
+  offset = base_address >> 20;
+  entry  = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
+
+  //4 bytes aligned
+  ttb += offset;
+  //create l1_entry
+  *ttb = entry;
+
+  offset2 = (base_address & 0xff000) >> 12;
+  ttb_l2 += offset2;
+  entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
+  for (i = 0; i < count; i++ )
+  {
+    //4 bytes aligned
+    *ttb_l2++ = entry2;
+    entry2 += OFFSET_4K;
+  }
+}
+
+/** \brief  Create a 64k page entry
+
+  \param [in]               ttb  L1 table base address
+  \param [in]      base_address  64k base address
+  \param [in]             count  Number of 64k pages to create
+  \param [in]     descriptor_l1  L1 descriptor (region attributes)
+  \param [in]            ttb_l2  L2 table base address
+  \param [in]     descriptor_l2  L2 descriptor (region attributes)
+
+*/
+__STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
+{
+  uint32_t offset, offset2;
+  uint32_t entry, entry2;
+  uint32_t i,j;
+
+
+  offset = base_address >> 20;
+  entry  = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
+
+  //4 bytes aligned
+  ttb += offset;
+  //create l1_entry
+  *ttb = entry;
+
+  offset2 = (base_address & 0xff000) >> 12;
+  ttb_l2 += offset2;
+  entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
+  for (i = 0; i < count; i++ )
+  {
+    //create 16 entries
+    for (j = 0; j < 16; j++)
+    {
+      //4 bytes aligned
+      *ttb_l2++ = entry2;
+    }
+    entry2 += OFFSET_64K;
+  }
+}
+
+/** \brief  Enable MMU
+
+  Enable MMU
+*/
+__STATIC_INLINE void MMU_Enable(void) {
+  // Set M bit 0 to enable the MMU
+  // Set AFE bit to enable simplified access permissions model
+  // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
+  __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
+  __ISB();
+}
+
+/** \brief  Disable MMU
+
+  Disable MMU
+*/
+__STATIC_INLINE void MMU_Disable(void) {
+  // Clear M bit 0 to disable the MMU
+  __set_SCTLR( __get_SCTLR() & ~1);
+  __ISB();
+}
+
+/** \brief  Invalidate entire unified TLB
+
+  TLBIALL. Invalidate entire unified TLB
+*/
+
+__STATIC_INLINE void MMU_InvalidateTLB(void) {
+  __set_TLBIALL(0);
+  __DSB();     //ensure completion of the invalidation
+  __ISB();     //ensure instruction fetch path sees new state
+}
+
+/*@} end of MMU_Functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CA_H_DEPENDANT */
+
+#endif /* __CMSIS_GENERIC */
diff --git a/CMSIS/Utilities/gen_pack.bat b/CMSIS/Utilities/gen_pack.bat
index fb142ea..773385c 100644
--- a/CMSIS/Utilities/gen_pack.bat
+++ b/CMSIS/Utilities/gen_pack.bat
@@ -49,6 +49,7 @@
 :: -- Core files 
 XCOPY /Q /S /Y ..\..\CMSIS\Core\Include\*.* %RELEASE_PATH%\CMSIS\Include\*.*
 XCOPY /Q /S /Y ..\..\CMSIS\Core\Template\ARMv8-M\*.* %RELEASE_PATH%\CMSIS\Core\Template\ARMv8-M\*.*
+XCOPY /Q /S /Y ..\..\CMSIS\CORE_A\Include\*.* %RELEASE_PATH%\CMSIS\CORE_A\Include\*.*
 
 :: -- DAP files 
 XCOPY /Q /S /Y ..\..\CMSIS\DAP\*.* %RELEASE_PATH%\CMSIS\DAP\*.*
diff --git a/Device/ARM/ARMCA7/Include/ARMCA7.h b/Device/ARM/ARMCA7/Include/ARMCA7.h
new file mode 100644
index 0000000..abfd243
--- /dev/null
+++ b/Device/ARM/ARMCA7/Include/ARMCA7.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ * @file     ARMCA7.h
+ * @brief    CMSIS Cortex-A7 Core Peripheral Access Layer Header File 
+ * @version  V1.00
+ * @data     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ARMCA7_H__
+#define __ARMCA7_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* -------------------------  Interrupt Number Definition  ------------------------ */
+
+typedef enum IRQn
+{
+/******  SGI Interrupts Numbers                 ****************************************/
+  SGI0_IRQn            =  0,
+  SGI1_IRQn            =  1,
+  SGI2_IRQn            =  2,
+  SGI3_IRQn            =  3,
+  SGI4_IRQn            =  4,
+  SGI5_IRQn            =  5,
+  SGI6_IRQn            =  6,
+  SGI7_IRQn            =  7,
+  SGI8_IRQn            =  8,
+  SGI9_IRQn            =  9,
+  SGI10_IRQn           = 10,
+  SGI11_IRQn           = 11,
+  SGI12_IRQn           = 12,
+  SGI13_IRQn           = 13,
+  SGI14_IRQn           = 14,
+  SGI15_IRQn           = 15,
+
+/******  Cortex-A7 Processor Exceptions Numbers ****************************************/
+  GlobalTimer_IRQn     = 27,        /*!< Global Timer Interrupt                        */
+  PrivTimer_IRQn       = 29,        /*!< Private Timer Interrupt                       */
+  PrivWatchdog_IRQn    = 30,        /*!< Private Watchdog Interrupt                    */
+
+/******  Platform Exceptions Numbers ***************************************************/
+  Watchdog_IRQn        = 32,        /*!< SP805 Interrupt        */
+  Timer0_IRQn          = 34,        /*!< SP804 Interrupt        */
+  Timer1_IRQn          = 35,        /*!< SP804 Interrupt        */
+  RTClock_IRQn         = 36,        /*!< PL031 Interrupt        */
+  UART0_IRQn           = 37,        /*!< PL011 Interrupt        */
+  UART1_IRQn           = 38,        /*!< PL011 Interrupt        */
+  UART2_IRQn           = 39,        /*!< PL011 Interrupt        */
+  UART3_IRQn           = 40,        /*!< PL011 Interrupt        */
+  MCI0_IRQn            = 41,        /*!< PL180 Interrupt (1st)  */
+  MCI1_IRQn            = 42,        /*!< PL180 Interrupt (2nd)  */
+  AACI_IRQn            = 43,        /*!< PL041 Interrupt        */
+  Keyboard_IRQn        = 44,        /*!< PL050 Interrupt        */
+  Mouse_IRQn           = 45,        /*!< PL050 Interrupt        */
+  CLCD_IRQn            = 46,        /*!< PL111 Interrupt        */
+  Ethernet_IRQn        = 47,        /*!< SMSC_91C111 Interrupt  */
+  VFS2_IRQn            = 73,        /*!< VFS2 Interrupt         */
+} IRQn_Type;
+
+/******************************************************************************/
+/*                         Peripheral memory map                              */
+/******************************************************************************/
+
+/* Peripheral and RAM base address */
+#define VE_A7_MP_FLASH_BASE0                  (0x00000000UL)                        /*!< (FLASH0    ) Base Address */
+#define VE_A7_MP_FLASH_BASE1                  (0x08000000UL)                        /*!< (FLASH1    ) Base Address */
+#define VE_A7_MP_PERIPH_BASE                  (0x18000000UL)                        /*!< (Peripheral) Base Address */
+#define VE_A7_MP_SRAM_BASE                    (0x2E000000UL)                        /*!< (SRAM      ) Base Address */
+#define VE_A7_MP_DRAM_BASE                    (0x80000000UL)                        /*!< (DRAM      ) Base Address */
+#define VE_A7_MP_VRAM_BASE                    (0x18000000UL)                        /*!< (VRAM      ) Base Address */
+#define VE_A7_MP_ETHERNET_BASE                (0x02000000UL + VE_A7_MP_PERIPH_BASE) /*!< (ETHERNET  ) Base Address */
+#define VE_A7_MP_USB_BASE                     (0x03000000UL + VE_A7_MP_PERIPH_BASE) /*!< (USB       ) Base Address */
+#define VE_A7_MP_DAP_BASE                     (0x1C000000UL)                        /*!< (DAP       ) Base Address */
+#define VE_A7_MP_SYSTEM_REG_BASE              (0x00010000UL + 0x1C000000UL)         /*!< (SYSTEM REG) Base Address */
+#define VE_A7_MP_SERIAL_BASE                  (0x00030000UL + 0x1C000000UL)         /*!< (SERIAL    ) Base Address */
+#define VE_A7_MP_AACI_BASE                    (0x00040000UL + 0x1C000000UL)         /*!< (AACI      ) Base Address */
+#define VE_A7_MP_MMCI_BASE                    (0x00050000UL + 0x1C000000UL)         /*!< (MMCI      ) Base Address */
+#define VE_A7_MP_KMI0_BASE                    (0x00060000UL + 0x1C000000UL)         /*!< (KMI0      ) Base Address */
+#define VE_A7_MP_UART_BASE                    (0x00090000UL + 0x1C000000UL)         /*!< (UART      ) Base Address */
+#define VE_A7_MP_WDT_BASE                     (0x000F0000UL + 0x1C000000UL)         /*!< (WDT       ) Base Address */
+#define VE_A7_MP_TIMER_BASE                   (0x00110000UL + 0x1C000000UL)         /*!< (TIMER     ) Base Address */
+#define VE_A7_MP_DVI_BASE                     (0x00160000UL + 0x1C000000UL)         /*!< (DVI       ) Base Address */
+#define VE_A7_MP_RTC_BASE                     (0x00170000UL + 0x1C000000UL)         /*!< (RTC       ) Base Address */
+#define VE_A7_MP_UART4_BASE                   (0x001B0000UL + 0x1C000000UL)         /*!< (UART4     ) Base Address */
+#define VE_A7_MP_CLCD_BASE                    (0x001F0000UL + 0x1C000000UL)         /*!< (CLCD      ) Base Address */
+#define VE_A7_MP_GIC_DISTRIBUTOR_BASE         (0x00001000UL + 0x2C000000UL)         /*!< (GIC DIST  ) Base Address */
+#define VE_A7_MP_GIC_INTERFACE_BASE           (0x00002000UL + 0x2C000000UL)         /*!< (GIC CPU IF) Base Address */
+#define GIC_DISTRIBUTOR_BASE                  VE_A7_MP_GIC_DISTRIBUTOR_BASE
+#define GIC_INTERFACE_BASE                    VE_A7_MP_GIC_INTERFACE_BASE
+
+//The VE-A7 model implements L1 cache as architecturally defined, but does not implement L2 cache.
+//Do not enable the L2 cache if you are running RTX on a VE-A7 model as it may cause a data abort.
+#define VE_A7_MP_PL310_BASE                   (0x2C0F0000UL)                        /*!< (L2C-310   ) Base Address */
+#define L2C_310_BASE                          VE_A7_MP_PL310_BASE
+
+/* --------  Configuration of the Cortex-A7 Processor and Core Peripherals  ------- */
+#define __CA_REV        0x0000U    /* Core revision r0p0                            */
+#define __CORTEX_A           7U    /* Cortex-A7 Core                                */
+#define __FPU_PRESENT        1U    /* FPU present                                   */
+
+#include "core_ca.h"
+#include <system_ARMCA7.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // __ARMCA7_H__
diff --git a/Device/ARM/ARMCA7/Include/system_ARMCA7.h b/Device/ARM/ARMCA7/Include/system_ARMCA7.h
new file mode 100644
index 0000000..e357c6a
--- /dev/null
+++ b/Device/ARM/ARMCA7/Include/system_ARMCA7.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ * @file     system_ARMCA7.h
+ * @brief    CMSIS Device System Header File for ARM Cortex-A Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SYSTEM_ARMCA7_H
+#define __SYSTEM_ARMCA7_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void(*IRQHandler)();
+uint32_t InterruptHandlerRegister(IRQn_Type, IRQHandler);
+uint32_t InterruptHandlerUnregister(IRQn_Type);
+
+/**
+ * Initialize the system
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief  Setup the microcontroller system.
+ *         Initialize the System and update the SystemCoreClock variable.
+ */
+extern void SystemInit (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_ARMCA7_H */
diff --git a/Device/ARM/ARMCA7/Source/ARM/ARMCA7.sct b/Device/ARM/ARMCA7/Source/ARM/ARMCA7.sct
new file mode 100644
index 0000000..66cf773
--- /dev/null
+++ b/Device/ARM/ARMCA7/Source/ARM/ARMCA7.sct
@@ -0,0 +1,37 @@
+;**************************************************
+; Copyright (c) 2017 ARM Ltd.  All rights reserved.
+;**************************************************
+
+; Scatter-file for RTX Example on Versatile Express
+
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
+
+; This platform has 2GB SDRAM starting at 0x80000000.
+
+
+SDRAM 0x80000000 0x40000000
+{
+    VECTORS +0 0x200000
+    {
+        * (RESET, +FIRST)         ; Vector table and other (assembler) startup code
+        * (InRoot$$Sections)      ; All (library) code that must be in a root region
+        * (+RO-CODE)              ; Application RO code (.text)
+        * (+RO-DATA)              ; Application RO data (.constdata)
+    }
+
+    RW_DATA 0x80200000 0x100000
+    { * (+RW) }                   ; Application RW data (.data)
+
+    ZI_DATA 0x80300000 0x0F0000
+    { * (+ZI) }                   ; Application ZI data (.bss)
+
+    ARM_LIB_STACK 0x80400000 EMPTY -0x8000 ; Stack region growing down
+    { }
+    
+    ARM_LIB_HEAP  0x803F0000 EMPTY  0x8000 ; Heap region growing up
+    { }
+
+    TTB     0x80500000 EMPTY 0x4000
+    { }                           ; Level-1 Translation Table for MMU
+
+}
diff --git a/Device/ARM/ARMCA7/Source/ARM/startup_ARMCA7.s b/Device/ARM/ARMCA7/Source/ARM/startup_ARMCA7.s
new file mode 100644
index 0000000..c564d12
--- /dev/null
+++ b/Device/ARM/ARMCA7/Source/ARM/startup_ARMCA7.s
@@ -0,0 +1,447 @@
+;/**************************************************************************//**
+; * @file     startup_ARMCA7.s
+; * @brief    CMSIS Core Device Startup File for ARM Cortex-A7 Device Series
+; * @version  V1.00
+; * @date     22 Feb 2017
+; *
+; * @note
+; *
+; ******************************************************************************/
+;/*
+; * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Licensed under the Apache License, Version 2.0 (the License); you may
+; * not use this file except in compliance with the License.
+; * You may obtain a copy of the License at
+;   *
+; * www.apache.org/licenses/LICENSE-2.0
+; *
+; * Unless required by applicable law or agreed to in writing, software
+; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; * See the License for the specific language governing permissions and
+; * limitations under the License.
+; */
+;/*
+;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
+;*/
+
+GICC_OFFSET     EQU     0x2000
+ICCIAR_OFFSET   EQU     0x000C
+ICCEOIR_OFFSET  EQU     0x0010
+
+Mode_USR        EQU     0x10
+Mode_FIQ        EQU     0x11
+Mode_IRQ        EQU     0x12
+Mode_SVC        EQU     0x13
+Mode_ABT        EQU     0x17
+Mode_UND        EQU     0x1B
+Mode_SYS        EQU     0x1F
+
+I_Bit           EQU     0x80            ; when I bit is set, IRQ is disabled
+F_Bit           EQU     0x40            ; when F bit is set, FIQ is disabled
+T_Bit           EQU     0x20            ; when T bit is set, core is in Thumb state
+
+Sect_Normal     EQU     0x00005c06 ;outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
+Sect_Normal_Cod EQU     0x0000dc06 ;outer & inner wb/wa, non-shareable, executable, ro, domain 0, base addr 0
+Sect_Normal_RO  EQU     0x0000dc16 ;as Sect_Normal_Cod, but not executable
+Sect_Normal_RW  EQU     0x00005c16 ;as Sect_Normal_Cod, but writeable and not executable
+Sect_SO         EQU     0x00000c12 ;strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
+Sect_Device_RO  EQU     0x00008c12 ;device, non-shareable, non-executable, ro, domain 0, base addr 0
+Sect_Device_RW  EQU     0x00000c12 ;as Sect_Device_RO, but writeable
+Sect_Fault      EQU     0x00000000 ;this translation will fault (the bottom 2 bits are important, the rest are ignored)
+
+RAM_BASE        EQU     0x80000000
+VRAM_BASE       EQU     0x18000000
+SRAM_BASE       EQU     0x2e000000
+ETHERNET        EQU     0x1a000000
+CS3_PERIPHERAL_BASE EQU 0x1c000000
+
+; <h> Stack Configuration
+;   <o> Stack Size (in Bytes, per mode) <0x0-0xFFFFFFFF:8>
+; </h>
+
+UND_Stack_Size  EQU     0x00000100
+SVC_Stack_Size  EQU     0x00000100
+ABT_Stack_Size  EQU     0x00000100
+FIQ_Stack_Size  EQU     0x00000000
+IRQ_Stack_Size  EQU     0x00000100
+USR_Stack_Size  EQU     0x00000100
+
+ISR_Stack_Size  EQU     (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
+                         FIQ_Stack_Size + IRQ_Stack_Size)
+
+                AREA    STACK, NOINIT, READWRITE, ALIGN=3
+Stack_Mem       SPACE   USR_Stack_Size
+__initial_sp    SPACE   ISR_Stack_Size
+
+Stack_Top
+
+
+; <h> Heap Configuration
+;   <o>  Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
+; </h>
+
+Heap_Size       EQU     0x00000000
+
+                AREA    HEAP, NOINIT, READWRITE, ALIGN=3
+__heap_base
+Heap_Mem        SPACE   Heap_Size
+__heap_limit
+
+
+                PRESERVE8
+                ARM
+
+
+; Vector Table Mapped to Address 0 at Reset
+
+                AREA    RESET, CODE, READONLY
+                EXPORT  __Vectors
+                EXPORT  __Vectors_End
+                EXPORT  __Vectors_Size
+
+__Vectors       LDR     PC, Reset_Addr            ; Address of Reset Handler
+                LDR     PC, Undef_Addr            ; Address of Undef Handler
+                LDR     PC, SVC_Addr              ; Address of SVC Handler
+                LDR     PC, PAbt_Addr             ; Address of Prefetch Abort Handler
+                LDR     PC, DAbt_Addr             ; Address of Data Abort Handler
+                NOP                               ; Reserved Vector
+                LDR     PC, IRQ_Addr              ; Address of IRQ Handler
+                LDR     PC, FIQ_Addr              ; Address of FIQ Handler
+__Vectors_End
+
+__Vectors_Size  EQU     __Vectors_End - __Vectors
+
+Reset_Addr      DCD     Reset_Handler
+Undef_Addr      DCD     Undef_Handler
+SVC_Addr        DCD     SVC_Handler
+PAbt_Addr       DCD     PAbt_Handler
+DAbt_Addr       DCD     DAbt_Handler
+IRQ_Addr        DCD     IRQ_Handler
+FIQ_Addr        DCD     FIQ_Handler
+
+                AREA    |.text|, CODE, READONLY
+
+Reset_Handler   PROC
+                EXPORT  Reset_Handler             [WEAK]
+                IMPORT  SystemInit
+                IMPORT  __main
+
+                ; Put any cores other than 0 to sleep
+                MRC     p15, 0, R0, c0, c0, 5     ; Read MPIDR
+                ANDS    R0, R0, #3
+goToSleep
+                WFINE
+                BNE     goToSleep
+
+                MRC     p15, 0, R0, c1, c0, 0       ; Read CP15 System Control register
+                BIC     R0, R0, #(0x1 << 12)        ; Clear I bit 12 to disable I Cache
+                BIC     R0, R0, #(0x1 <<  2)        ; Clear C bit  2 to disable D Cache
+                BIC     R0, R0, #0x1                ; Clear M bit  0 to disable MMU
+                BIC     R0, R0, #(0x1 << 11)        ; Clear Z bit 11 to disable branch prediction
+                BIC     R0, R0, #(0x1 << 13)        ; Clear V bit 13 to disable hivecs
+                MCR     p15, 0, R0, c1, c0, 0       ; Write value back to CP15 System Control register
+                ISB
+
+; ACTLR.SMP bit must be set before the caches and MMU are enabled,
+; or any cache and TLB maintenance operations are performed, even for "AMP" CPUs
+                MRC     p15, 0, r0, c1, c0, 1       ; Read CP15 Auxiliary Control Register
+                ORR     r0, r0, #(1 <<  6)          ; Set ACTLR.SMP bit
+                ORR     r0, r0, #(1 << 13)          ; Set L1PCTL L1 Data prefetch control to 0b11
+                ORR     r0, r0, #(1 << 14)          ;  3 outstanding pre-fetches permitted, this is the reset value
+                MCR     p15, 0, r0, c1, c0, 1       ; Write CP15 Auxiliary Control Register
+
+; Set Vector Base Address Register (VBAR) to point to this application's vector table
+                LDR     R0, =__Vectors
+                MCR     p15, 0, R0, c12, c0, 0
+
+;  Setup Stack for each exceptional mode
+                LDR     R0, =Stack_Top
+
+;  Enter Undefined Instruction Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_UND:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #UND_Stack_Size
+
+;  Enter Abort Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_ABT:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #ABT_Stack_Size
+
+;  Enter FIQ Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_FIQ:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #FIQ_Stack_Size
+
+;  Enter IRQ Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_IRQ:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #IRQ_Stack_Size
+
+;  Enter Supervisor Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_SVC:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+
+;  Enter System Mode to complete initialization and enter kernel
+                MSR     CPSR_C, #Mode_SYS:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+
+                IMPORT  MMU_CreateTranslationTable
+                BL      MMU_CreateTranslationTable
+
+                MOV     r0, #0x0
+                MCR     p15, 0, r0, c8, c7, 0     ; TLBIALL - Invalidate entire Unified TLB
+                MCR     p15, 0, r0, c7, c5, 6     ; BPIALL  - Invalidate entire branch predictor array
+                DSB
+                ISB
+                MCR     p15, 0, r0, c7, c5, 0     ; ICIALLU - Invalidate instruction cache and flush branch target cache
+                DSB
+                ISB
+
+;  Invalidate data cache
+                MOV     r0, #0x0                  ; 0 = invalidate data cache, 1 = clean data cache.
+
+                MRC     p15, 1, R6, c0, c0, 1     ; Read CLIDR
+                ANDS    R3, R6, #0x07000000       ; Extract coherency level
+                MOV     R3, R3, LSR #23           ; Total cache levels << 1
+                BEQ     Finished                  ; If 0, no need to clean
+
+                MOV     R10, #0                   ; R10 holds current cache level << 1
+Loop1           ADD     R2, R10, R10, LSR #1      ; R2 holds cache "Set" position
+                MOV     R1, R6, LSR R2            ; Bottom 3 bits are the Cache-type for this level
+                AND     R1, R1, #7                ; Isolate those lower 3 bits
+                CMP     R1, #2
+                BLT     Skip                      ; No cache or only instruction cache at this level
+
+                MCR     p15, 2, R10, c0, c0, 0    ; Write the Cache Size selection register
+                ISB                               ; ISB to sync the change to the CacheSizeID reg
+                MRC     p15, 1, R1, c0, c0, 0     ; Reads current Cache Size ID register
+                AND     R2, R1, #7                ; Extract the line length field
+                ADD     R2, R2, #4                ; Add 4 for the line length offset (log2 16 bytes)
+                LDR     R4, =0x3FF
+                ANDS    R4, R4, R1, LSR #3        ; R4 is the max number on the way size (right aligned)
+                CLZ     R5, R4                    ; R5 is the bit position of the way size increment
+                LDR     R7, =0x7FFF
+                ANDS    R7, R7, R1, LSR #13       ; R7 is the max number of the index size (right aligned)
+
+Loop2           MOV     R9, R4                    ; R9 working copy of the max way size (right aligned)
+
+Loop3           ORR     R11, R10, R9, LSL R5      ; Factor in the Way number and cache number into R11
+                ORR     R11, R11, R7, LSL R2      ; Factor in the Set number
+                CMP     R0, #0
+                BNE     Dccsw
+                MCR     p15, 0, R11, c7, c6, 2    ; DCISW. Invalidate by Set/Way
+                B       cont
+Dccsw           CMP     R0, #1
+                BNE     Dccisw
+                MCR     p15, 0, R11, c7, c10, 2   ; DCCSW. Clean by Set/Way
+                B       cont
+Dccisw          MCR     p15, 0, R11, c7, c14, 2   ; DCCISW. Clean and Invalidate by Set/Way
+cont            SUBS    R9, R9, #1                ; Decrement the Way number
+                BGE     Loop3
+                SUBS    R7, R7, #1                ; Decrement the Set number
+                BGE     Loop2
+Skip            ADD     R10, R10, #2              ; Increment the cache number
+                CMP     R3, R10
+                BGT     Loop1
+Finished
+                DSB
+
+;  Enable MMU, but leave caches disabled (they will be enabled later)
+                MRC     p15, 0, r0, c1, c0, 0     ; Read CP15 System Control register
+                ORR     r0, r0, #(0x1 << 29)      ; Set AFE bit 29 to enable simplified access permissions model
+                BIC     r0, r0, #(0x1 << 28)      ; Clear TRE bit 28 to disable TEX remap
+                BIC     r0, r0, #(0x1 << 12)      ; Clear I bit 12 to disable I Cache
+                BIC     r0, r0, #(0x1 <<  2)      ; Clear C bit  2 to disable D Cache
+                BIC     r0, r0, #(0x1 <<  1)      ; Clear A bit  1 to disable strict alignment fault checking
+                ORR     r0, r0, #0x1              ; Set M bit 0 to enable MMU
+                MCR     p15, 0, r0, c1, c0, 0     ; Write CP15 System Control register
+
+;  USR/SYS stack pointer will be set during kernel init
+
+                LDR     R0, =SystemInit
+                BLX     R0
+                LDR     R0, =__main
+                BLX     R0
+
+                ENDP
+
+Undef_Handler\
+                PROC
+                EXPORT  Undef_Handler             [WEAK]
+                IMPORT  CUndefHandler
+                SRSFD   SP!, #Mode_UND
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to UND mode stack
+
+                MRS     R0, SPSR
+                TST     R0, #T_Bit                ; Check mode
+                MOVEQ   R1, #4                    ; R1 = 4 ARM mode
+                MOVNE   R1, #2                    ; R1 = 2 Thumb mode
+                SUB     R0, LR, R1
+                LDREQ   R0, [R0]                  ; ARM mode - R0 points to offending instruction
+                BEQ     undef_cont
+
+                ;Thumb instruction
+                ;Determine if it is a 32-bit Thumb instruction
+                LDRH    R0, [R0]
+                MOV     R2, #0x1c
+                CMP     R2, R0, LSR #11
+                BHS     undef_cont                ;16-bit Thumb instruction
+
+                ;32-bit Thumb instruction. Unaligned - we need to reconstruct the offending instruction
+                LDRH    R2, [LR]
+                ORR     R0, R2, R0, LSL #16
+undef_cont
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                ;R0 Offending instruction
+                ;R1 =2 (Thumb) or =4 (ARM)
+                BL      CUndefHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                LDR     LR, [SP, #24]             ; Restore stacked LR and possibly adjust for retry
+                SUB     LR, LR, R0
+                LDR     R0, [SP, #28]             ; Restore stacked SPSR
+                MSR     SPSR_CXSF, R0
+                POP     {R0-R4, R12}              ; Restore stacked APCS registers
+                ADD     SP, SP, #8                ; Adjust SP for already-restored banked registers
+                MOVS    PC, LR
+                ENDP
+
+PAbt_Handler\
+                PROC
+                EXPORT  PAbt_Handler              [WEAK]
+                IMPORT  CPAbtHandler
+                SUB     LR, LR, #4                ; Pre-adjust LR
+                SRSFD   SP!, #Mode_ABT            ; Save LR and SPRS to ABT mode stack
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to ABT mode stack
+                MRC     p15, 0, R0, c5, c0, 1     ; IFSR
+                MRC     p15, 0, R1, c6, c0, 2     ; IFAR
+
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                BL      CPAbtHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                POP     {R0-R4, R12}              ; Restore stack APCS registers
+                RFEFD   SP!                       ; Return from exception
+                ENDP
+
+
+DAbt_Handler\
+                PROC
+                EXPORT  DAbt_Handler              [WEAK]
+                IMPORT  CDAbtHandler
+                SUB     LR, LR, #8                ; Pre-adjust LR
+                SRSFD   SP!, #Mode_ABT            ; Save LR and SPRS to ABT mode stack
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to ABT mode stack
+                CLREX                             ; State of exclusive monitors unknown after taken data abort
+                MRC     p15, 0, R0, c5, c0, 0     ; DFSR
+                MRC     p15, 0, R1, c6, c0, 0     ; DFAR
+
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                BL      CDAbtHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                POP     {R0-R4, R12}              ; Restore stacked APCS registers
+                RFEFD   SP!                       ; Return from exception
+                ENDP
+
+FIQ_Handler\
+                PROC
+                EXPORT  FIQ_Handler               [WEAK]
+                B       .
+                ENDP
+
+SVC_Handler\
+                PROC
+                EXPORT  SVC_Handler               [WEAK]
+                B       .
+                ENDP
+
+IRQ_Handler\
+                PROC
+                EXPORT  IRQ_Handler                [WEAK]
+                IMPORT  IRQCount
+                IMPORT  IRQTable
+                IMPORT  IRQNestLevel                ; Flag indicates whether inside an ISR, and the depth of nesting.  0 = not in ISR.
+
+
+                ;prologue
+                SUB     LR, LR, #4                  ; Pre-adjust LR
+                SRSFD   SP!, #Mode_SVC              ; Save LR_IRQ and SPRS_IRQ to SVC mode stack
+                CPS     #Mode_SVC                   ; Switch to SVC mode, to avoid a nested interrupt corrupting LR on a BL
+                PUSH    {R0-R3, R12}                ; Save remaining APCS corruptible registers to SVC stack
+
+                AND     R1, SP, #4                  ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R1                  ; Adjust stack
+                PUSH    {R1, LR}                    ; Store stack adjustment and LR_SVC to SVC stack
+
+                LDR     R0, =IRQNestLevel           ; Get address of nesting counter
+                LDR     R1, [R0]
+                ADD     R1, R1, #1                  ; Increment nesting counter
+                STR     R1, [R0]
+
+                ;identify and acknowledge interrupt
+                MRC     p15, 4, R1, c15, c0, 0      ; Read CBAR
+                ADD     R1, R1, #GICC_OFFSET        ; Add GICC offset
+                LDR     R0, [R1, #ICCIAR_OFFSET]    ; Read ICCIAR (GIC CPU Interface register)
+                DSB                                 ; Ensure that interrupt acknowledge completes before re-enabling interrupts
+
+                LDR     R2, =IRQCount               ; Read number of entries in IRQ handler table
+                LDR     R2, [R2]
+                CMP     R0, R2                      ; Is there a handler for this IRQ?
+                BHS     end_int                     ; No handler, so return as normal
+                LDR     R2, =IRQTable               ; Get address of handler
+                LDR     R2, [R2, R0, LSL #2]
+                CMP     R2, #0                      ; Clean up and return if handler address is 0
+                BEQ     end_int
+                PUSH    {R0,R1}
+
+                CPSIE   i                           ; Now safe to re-enable interrupts
+                BLX     R2                          ; Call handler. R0 = IRQ number. Beware calls to PendSV_Handler and OS_Tick_Handler do not return this way
+                CPSID   i                           ; Disable interrupts again
+
+                POP     {R0,R1}
+                DSB                                 ; Ensure that interrupt source is cleared before signalling End Of Interrupt
+end_int
+                ; R0 still contains the interrupt ID
+                ; R1 still contains GICI_BASE
+                STR     R0, [R1, #ICCEOIR_OFFSET]   ; Normal end-of-interrupt write to EOIR (GIC CPU Interface register) to clear the active bit
+ret_irq
+                ;epilogue
+                LDR     R0, =IRQNestLevel           ; Get address of nesting counter
+                LDR     R1, [R0]
+                SUB     R1, R1, #1                  ; Decrement nesting counter
+                STR     R1, [R0]
+
+                POP     {R1, LR}                    ; Get stack adjustment and restore LR_SVC
+                ADD     SP, SP, R1                  ; Unadjust stack
+
+                POP     {R0-R3,R12}                 ; Restore stacked APCS registers
+                RFEFD   SP!                         ; Return from exception
+                ENDP
+
+                END
diff --git a/Device/ARM/ARMCA7/Source/mmu_ARMCA7.c b/Device/ARM/ARMCA7/Source/mmu_ARMCA7.c
new file mode 100644
index 0000000..fb7e38e
--- /dev/null
+++ b/Device/ARM/ARMCA7/Source/mmu_ARMCA7.c
@@ -0,0 +1,236 @@
+/**************************************************************************//**
+ * @file     mmu_ARMCA7.c
+ * @brief    MMU Configuration for ARM Cortex-A7 Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Memory map description from: DUI0447G_v2m_p1_trm.pdf 4.2.2 ARM Cortex-A Series memory map
+
+                                                     Memory Type
+0xffffffff |--------------------------|             ------------
+           |       FLAG SYNC          |             Device Memory
+0xfffff000 |--------------------------|             ------------
+           |         Fault            |                Fault
+0xfff00000 |--------------------------|             ------------
+           |                          |                Normal
+           |                          |
+           |      Daughterboard       |
+           |         memory           |
+           |                          |
+0x80505000 |--------------------------|             ------------
+           |TTB (L2 Sync Flags   ) 4k |                Normal
+0x80504C00 |--------------------------|             ------------
+           |TTB (L2 Peripherals-B) 16k|                Normal
+0x80504800 |--------------------------|             ------------
+           |TTB (L2 Peripherals-A) 16k|                Normal
+0x80504400 |--------------------------|             ------------
+           |TTB (L2 Priv Periphs)  4k |                Normal
+0x80504000 |--------------------------|             ------------
+           |    TTB (L1 Descriptors)  |                Normal
+0x80500000 |--------------------------|             ------------
+           |           Heap           |                Normal
+           |--------------------------|             ------------
+           |          Stack           |                Normal
+0x80400000 |--------------------------|             ------------
+           |         ZI Data          |                Normal
+0x80300000 |--------------------------|             ------------
+           |         RW Data          |                Normal
+0x80200000 |--------------------------|             ------------
+           |         RO Data          |                Normal
+           |--------------------------|             ------------
+           |         RO Code          |              USH Normal
+0x80000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |      HSB AXI buses       |
+0x40000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |  test chips peripherals  |
+0x2c002000 |--------------------------|             ------------
+           |     Private Address      |            Device Memory
+0x2c000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |  test chips peripherals  |
+0x20000000 |--------------------------|             ------------
+           |       Peripherals        |           Device Memory RW/RO
+           |                          |              & Fault
+0x00000000 |--------------------------|
+*/
+
+// L1 Cache info and restrictions about architecture of the caches (CCSIR register):
+// Write-Through support *not* available
+// Write-Back support available.
+// Read allocation support available.
+// Write allocation support available.
+
+//Note: You should use the Shareable attribute carefully.
+//For cores without coherency logic (such as SCU) marking a region as shareable forces the processor to not cache that region regardless of the inner cache settings.
+//Cortex-A versions of RTX use LDREX/STREX instructions relying on Local monitors. Local monitors will be used only when the region gets cached, regions that are not cached will use the Global Monitor.
+//Some Cortex-A implementations do not include Global Monitors, so wrongly setting the attribute Shareable may cause STREX to fail.
+
+//Recall: When the Shareable attribute is applied to a memory region that is not Write-Back, Normal memory, data held in this region is treated as Non-cacheable.
+//When SMP bit = 0, Inner WB/WA Cacheable Shareable attributes are treated as Non-cacheable.
+//When SMP bit = 1, Inner WB/WA Cacheable Shareable attributes are treated as Cacheable.
+
+
+//Following MMU configuration is expected
+//SCTLR.AFE == 1 (Simplified access permissions model - AP[2:1] define access permissions, AP[0] is an access flag)
+//SCTLR.TRE == 0 (TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor)
+//Domain 0 is always the Client domain
+//Descriptors should place all memory in domain 0
+
+#include <stdint.h>
+#include "ARMCA7.h"
+
+
+// L2 table pointers
+//----------------------------------------
+#define PRIVATE_TABLE_L2_BASE_4k       (0x80504000) //Map 4k Private Address space
+#define SYNC_FLAGS_TABLE_L2_BASE_4k    (0x80504C00) //Map 4k Flag synchronization
+#define PERIPHERAL_A_TABLE_L2_BASE_64k (0x80504400) //Map 64k Peripheral #1 0x1C000000 - 0x1C00FFFFF
+#define PERIPHERAL_B_TABLE_L2_BASE_64k (0x80504800) //Map 64k Peripheral #2 0x1C100000 - 0x1C1FFFFFF
+
+//--------------------- PERIPHERALS -------------------
+#define PERIPHERAL_A_FAULT (0x00000000 + 0x1c000000) //0x1C000000-0x1C00FFFF (1M)
+#define PERIPHERAL_B_FAULT (0x00100000 + 0x1c000000) //0x1C100000-0x1C10FFFF (1M)
+
+//--------------------- SYNC FLAGS --------------------
+#define FLAG_SYNC     0xFFFFF000
+#define F_SYNC_BASE   0xFFF00000  //1M aligned
+
+//Import symbols from linker
+extern uint32_t Image$$VECTORS$$Base;
+extern uint32_t Image$$RW_DATA$$Base;
+extern uint32_t Image$$ZI_DATA$$Base;
+extern uint32_t Image$$TTB$$ZI$$Base;
+
+static uint32_t Sect_Normal;     //outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
+static uint32_t Sect_Normal_Cod; //outer & inner wb/wa, non-shareable, executable, ro, domain 0, base addr 0
+static uint32_t Sect_Normal_RO;  //as Sect_Normal_Cod, but not executable
+static uint32_t Sect_Normal_RW;  //as Sect_Normal_Cod, but writeable and not executable
+static uint32_t Sect_Device_RO;  //device, non-shareable, non-executable, ro, domain 0, base addr 0
+static uint32_t Sect_Device_RW;  //as Sect_Device_RO, but writeable
+
+/* Define global descriptors */
+static uint32_t Page_L1_4k  = 0x0;  //generic
+static uint32_t Page_L1_64k = 0x0;  //generic
+static uint32_t Page_4k_Device_RW;  //Shared device, not executable, rw, domain 0
+static uint32_t Page_64k_Device_RW; //Shared device, not executable, rw, domain 0
+
+void MMU_CreateTranslationTable(void)
+{
+    mmu_region_attributes_Type region;
+
+    //Create 4GB of faulting entries
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, 0, 4096, DESCRIPTOR_FAULT);
+
+    /*
+     * Generate descriptors. Refer to core_ca.h to get information about attributes
+     *
+     */
+    //Create descriptors for Vectors, RO, RW, ZI sections
+    section_normal(Sect_Normal, region);
+    section_normal_cod(Sect_Normal_Cod, region);
+    section_normal_ro(Sect_Normal_RO, region);
+    section_normal_rw(Sect_Normal_RW, region);
+    //Create descriptors for peripherals
+    section_device_ro(Sect_Device_RO, region);
+    section_device_rw(Sect_Device_RW, region);
+    //Create descriptors for 64k pages
+    page64k_device_rw(Page_L1_64k, Page_64k_Device_RW, region);
+    //Create descriptors for 4k pages
+    page4k_device_rw(Page_L1_4k, Page_4k_Device_RW, region);
+
+
+    /*
+     *  Define MMU flat-map regions and attributes
+     *
+     */
+
+    //Define Image
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, 1, Sect_Normal_Cod);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, 1, Sect_Normal_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, 1, Sect_Normal_RW);
+
+    //all DRAM executable, rw, cacheable - applications may choose to divide memory into ro executable
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$TTB$$ZI$$Base, 2043, Sect_Normal);
+
+    //--------------------- PERIPHERALS -------------------
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_FLASH_BASE0    , 64, Sect_Device_RO);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_FLASH_BASE1    , 64, Sect_Device_RO);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_SRAM_BASE      , 64, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_VRAM_BASE      , 32, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_ETHERNET_BASE  , 16, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A7_MP_USB_BASE       , 16, Sect_Device_RW);
+
+    // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C000000-0x1C00FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_A_FAULT      , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT);
+    // Define peripheral range 0x1C000000-0x1C00FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_DAP_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_SYSTEM_REG_BASE,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_SERIAL_BASE    ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_AACI_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_MMCI_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_KMI0_BASE      ,  2, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_UART_BASE      ,  4, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_WDT_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+
+    // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C100000-0x1C10FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_B_FAULT      , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT);
+    // Define peripheral range 0x1C100000-0x1C10FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_TIMER_BASE     ,  2, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_DVI_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_RTC_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_UART4_BASE     ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A7_MP_CLCD_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+
+    // Create (256 * 4k)=1MB faulting entries to cover private address space. Needs to be marked as Device memory
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR()            ,256,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT);
+    // Define private address space entry.
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR()            ,  3,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+    // Define L2CC entry.  Uncomment if PL310 is present
+    //    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, VE_A7_MP_PL310_BASE     ,  1,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+
+    // Create (256 * 4k)=1MB faulting entries to synchronization space (Useful if some non-cacheable DMA agent is present in the SoC)
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, F_SYNC_BASE , 256, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT);
+    // Define synchronization space entry.
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, FLAG_SYNC   ,   1, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+
+    /* Set location of level 1 page table
+    ; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset)
+    ; 13:7  - 0x0
+    ; 6     - IRGN[0] 0x0 (Inner WB WA)
+    ; 5     - NOS     0x0 (Non-shared)
+    ; 4:3   - RGN     0x1 (Outer WB WA)
+    ; 2     - IMP     0x0 (Implementation Defined)
+    ; 1     - S       0x0 (Non-shared)
+    ; 0     - IRGN[1] 0x1 (Inner WB WA) */
+    __set_TTBR0(((uint32_t)&Image$$TTB$$ZI$$Base) | 9);
+    __ISB();
+
+    /* Set up domain access control register
+    ; We set domain 0 to Client and all other domains to No Access.
+    ; All translation table entries specify domain 0 */
+    __set_DACR(1);
+    __ISB();
+}
diff --git a/Device/ARM/ARMCA7/Source/system_ARMCA7.c b/Device/ARM/ARMCA7/Source/system_ARMCA7.c
new file mode 100644
index 0000000..7a71b28
--- /dev/null
+++ b/Device/ARM/ARMCA7/Source/system_ARMCA7.c
@@ -0,0 +1,289 @@
+/******************************************************************************
+ * @file     system_ARMCA7.c
+ * @brief    CMSIS Device System Source File for ARM Cortex-A7 Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ARMCA7.h>
+#include <stdint.h>
+
+extern void $Super$$main(void);
+__asm void __FPU_Enable(void);
+
+// Flag indicates whether inside an ISR, and the depth of nesting.  0 = not in ISR.
+uint32_t IRQNestLevel = 0;
+
+/**
+ * Initialize the memory subsystem.
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief Initialize the memory subsystem, including enabling the cache and BTAC. Requires PL1, so implemented as an SVC in case threads are USR mode.
+ */
+#pragma push
+#pragma arm
+void __svc(1) EnableCaches(void);
+void __SVC_1(void) {
+
+/* Before enabling the caches, the instruction cache, the data cache, TLB, and BTAC must have been invalidated.
+ * You are not required to invalidate the main TLB, even though it is recommended for safety
+ * reasons. This ensures compatibility with future revisions of the processor. */
+
+//  unsigned int l2_id;
+
+  /* After MMU is enabled and data has been invalidated, enable caches and BTAC */
+  L1C_EnableCaches();
+  L1C_EnableBTAC();
+
+  /* If L2C-310 is present, Invalidate and Enable L2 cache here */
+//  l2_id = L2C_GetID();
+//  if (l2_id)
+//  {
+//     L2C_InvAllByWay();
+//     L2C_Enable();
+//  }
+}
+#pragma pop
+
+IRQHandler IRQTable[] = {
+    0, //IRQ 0
+    0, //IRQ 1
+    0, //IRQ 2
+    0, //IRQ 3
+    0, //IRQ 4
+    0, //IRQ 5
+    0, //IRQ 6
+    0, //IRQ 7
+    0, //IRQ 8
+    0, //IRQ 9
+    0, //IRQ 10
+    0, //IRQ 11
+    0, //IRQ 12
+    0, //IRQ 13
+    0, //IRQ 14
+    0, //IRQ 15
+    0, //IRQ 16
+    0, //IRQ 17
+    0, //IRQ 18
+    0, //IRQ 19
+    0, //IRQ 20
+    0, //IRQ 21
+    0, //IRQ 22
+    0, //IRQ 23
+    0, //IRQ 24
+    0, //IRQ 25
+    0, //IRQ 26
+    0, //IRQ 27
+    0, //IRQ 28
+    0, //IRQ 29
+    0, //IRQ 30
+    0, //IRQ 31
+    0, //IRQ 32
+    0, //IRQ 33
+    0, //IRQ 34
+    0, //IRQ 35
+    0, //IRQ 36
+    0, //IRQ 37
+    0, //IRQ 38
+    0, //IRQ 39
+    0  //IRQ 40
+};
+uint32_t IRQCount = sizeof IRQTable / 4;
+
+uint32_t InterruptHandlerRegister (IRQn_Type irq, IRQHandler handler)
+{
+    if (irq < IRQCount) {
+        IRQTable[irq] = handler;
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}
+
+uint32_t InterruptHandlerUnregister (IRQn_Type irq)
+{
+    if (irq < IRQCount) {
+        IRQTable[irq] = 0;
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}
+
+/**
+ * Initialize the system
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief  Setup the microcontroller system.
+ *         Initialize the System.
+ */
+void SystemInit (void)
+{
+/*       do not use global variables because this function is called before
+         reaching pre-main. RW section may be overwritten afterwards.          */
+    GIC_Enable();
+}
+
+void $Sub$$main(void)
+{
+#ifdef __CMSIS_RTOS
+  extern void PendSV_Handler(uint32_t);
+  extern void OS_Tick_Handler(uint32_t);
+  InterruptHandlerRegister(SGI0_IRQn     , PendSV_Handler);
+  InterruptHandlerRegister(PrivTimer_IRQn, OS_Tick_Handler);
+  EnableCaches();
+#endif
+  
+  $Super$$main(); //Call main
+}
+
+//Fault Status Register (IFSR/DFSR) definitions
+#define FSR_ALIGNMENT_FAULT                  0x01   //DFSR only. Fault on first lookup
+#define FSR_INSTRUCTION_CACHE_MAINTENANCE    0x04   //DFSR only - async/external
+#define FSR_SYNC_EXT_TTB_WALK_FIRST          0x0c   //sync/external
+#define FSR_SYNC_EXT_TTB_WALK_SECOND         0x0e   //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_FIRST       0x1c   //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_SECOND      0x1e   //sync/external
+#define FSR_TRANSLATION_FAULT_FIRST          0x05   //MMU Fault - internal
+#define FSR_TRANSLATION_FAULT_SECOND         0x07   //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_FIRST          0x03   //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_SECOND         0x06   //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_FIRST               0x09   //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_SECOND              0x0b   //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_FIRST           0x0f   //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_SECOND          0x0d   //MMU Fault - internal
+#define FSR_DEBUG_EVENT                      0x02   //internal
+#define FSR_SYNC_EXT_ABORT                   0x08   //sync/external
+#define FSR_TLB_CONFLICT_ABORT               0x10   //sync/external
+#define FSR_LOCKDOWN                         0x14   //internal
+#define FSR_COPROCESSOR_ABORT                0x1a   //internal
+#define FSR_SYNC_PARITY_ERROR                0x19   //sync/external
+#define FSR_ASYNC_EXTERNAL_ABORT             0x16   //DFSR only - async/external
+#define FSR_ASYNC_PARITY_ERROR               0x18   //DFSR only - async/external
+
+void CDAbtHandler(uint32_t DFSR, uint32_t DFAR, uint32_t LR) {
+  uint32_t FS = (DFSR & (1 << 10)) >> 6 | (DFSR & 0x0f); //Store Fault Status
+
+  switch(FS) {
+    //Synchronous parity errors - retry
+    case FSR_SYNC_PARITY_ERROR:
+    case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+    case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+        return;
+
+    //Your code here. Value in DFAR is invalid for some fault statuses.
+    case FSR_ALIGNMENT_FAULT:
+    case FSR_INSTRUCTION_CACHE_MAINTENANCE:
+    case FSR_SYNC_EXT_TTB_WALK_FIRST:
+    case FSR_SYNC_EXT_TTB_WALK_SECOND:
+    case FSR_TRANSLATION_FAULT_FIRST:
+    case FSR_TRANSLATION_FAULT_SECOND:
+    case FSR_ACCESS_FLAG_FAULT_FIRST:
+    case FSR_ACCESS_FLAG_FAULT_SECOND:
+    case FSR_DOMAIN_FAULT_FIRST:
+    case FSR_DOMAIN_FAULT_SECOND:
+    case FSR_PERMISSION_FAULT_FIRST:
+    case FSR_PERMISSION_FAULT_SECOND:
+    case FSR_DEBUG_EVENT:
+    case FSR_SYNC_EXT_ABORT:
+    case FSR_TLB_CONFLICT_ABORT:
+    case FSR_LOCKDOWN:
+    case FSR_COPROCESSOR_ABORT:
+    case FSR_ASYNC_EXTERNAL_ABORT: //DFAR invalid
+    case FSR_ASYNC_PARITY_ERROR:   //DFAR invalid
+    default:
+      while(1);
+  }
+}
+
+void CPAbtHandler(uint32_t IFSR, uint32_t IFAR, uint32_t LR) {
+  uint32_t FS = (IFSR & (1 << 10)) >> 6 | (IFSR & 0x0f); //Store Fault Status
+
+  switch(FS) {
+    //Synchronous parity errors - retry
+    case FSR_SYNC_PARITY_ERROR:
+    case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+    case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+      return;
+
+    //Your code here. Value in IFAR is invalid for some fault statuses.
+    case FSR_SYNC_EXT_TTB_WALK_FIRST:
+    case FSR_SYNC_EXT_TTB_WALK_SECOND:
+    case FSR_TRANSLATION_FAULT_FIRST:
+    case FSR_TRANSLATION_FAULT_SECOND:
+    case FSR_ACCESS_FLAG_FAULT_FIRST:
+    case FSR_ACCESS_FLAG_FAULT_SECOND:
+    case FSR_DOMAIN_FAULT_FIRST:
+    case FSR_DOMAIN_FAULT_SECOND:
+    case FSR_PERMISSION_FAULT_FIRST:
+    case FSR_PERMISSION_FAULT_SECOND:
+    case FSR_DEBUG_EVENT: //IFAR invalid
+    case FSR_SYNC_EXT_ABORT:
+    case FSR_TLB_CONFLICT_ABORT:
+    case FSR_LOCKDOWN:
+    case FSR_COPROCESSOR_ABORT:
+    default:
+      while(1);
+  }
+}
+
+//returns amount to decrement lr by
+//this will be 0 when we have emulated the instruction and want to execute the next instruction
+//this will be 2 when we have performed some maintenance and want to retry the instruction in Thumb (state == 2)
+//this will be 4 when we have performed some maintenance and want to retry the instruction in ARM   (state == 4)
+uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
+  const int THUMB = 2;
+  const int ARM = 4;
+  //Lazy VFP/NEON initialisation and switching
+
+  // (ARM ARM section A7.5) VFP data processing instruction?
+  // (ARM ARM section A7.6) VFP/NEON register load/store instruction?
+  // (ARM ARM section A7.8) VFP/NEON register data transfer instruction?
+  // (ARM ARM section A7.9) VFP/NEON 64-bit register data transfer instruction?
+  if ((state == ARM   && ((opcode & 0x0C000000) >> 26 == 0x03)) ||
+      (state == THUMB && ((opcode & 0xEC000000) >> 26 == 0x3B))) {
+    if (((opcode & 0x00000E00) >> 9) == 5) {
+      __FPU_Enable();
+      return state;
+    }
+  }
+
+  // (ARM ARM section A7.4) NEON data processing instruction?
+  if ((state == ARM   && ((opcode & 0xFE000000) >> 24 == 0xF2)) ||
+      (state == THUMB && ((opcode & 0xEF000000) >> 24 == 0xEF)) ||
+      // (ARM ARM section A7.7) NEON load/store instruction?
+      (state == ARM   && ((opcode >> 24) == 0xF4)) ||
+      (state == THUMB && ((opcode >> 24) == 0xF9))) {
+    __FPU_Enable(); 
+    return state;
+  }
+
+  //Add code here for other Undef cases
+  while(1);
+}
diff --git a/Device/ARM/ARMCA9/Include/ARMCA9.h b/Device/ARM/ARMCA9/Include/ARMCA9.h
new file mode 100644
index 0000000..825341d
--- /dev/null
+++ b/Device/ARM/ARMCA9/Include/ARMCA9.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ * @file     ARMCA9.h
+ * @brief    CMSIS Cortex-A9 Core Peripheral Access Layer Header File 
+ * @version  V1.00
+ * @data     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ARMCA9_H__
+#define __ARMCA9_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* -------------------------  Interrupt Number Definition  ------------------------ */
+
+typedef enum IRQn
+{
+/******  SGI Interrupts Numbers                 ****************************************/
+  SGI0_IRQn            =  0,
+  SGI1_IRQn            =  1,
+  SGI2_IRQn            =  2,
+  SGI3_IRQn            =  3,
+  SGI4_IRQn            =  4,
+  SGI5_IRQn            =  5,
+  SGI6_IRQn            =  6,
+  SGI7_IRQn            =  7,
+  SGI8_IRQn            =  8,
+  SGI9_IRQn            =  9,
+  SGI10_IRQn           = 10,
+  SGI11_IRQn           = 11,
+  SGI12_IRQn           = 12,
+  SGI13_IRQn           = 13,
+  SGI14_IRQn           = 14,
+  SGI15_IRQn           = 15,
+
+/******  Cortex-A9 Processor Exceptions Numbers ****************************************/
+  GlobalTimer_IRQn     = 27,        /*!< Global Timer Interrupt                        */
+  PrivTimer_IRQn       = 29,        /*!< Private Timer Interrupt                       */
+  PrivWatchdog_IRQn    = 30,        /*!< Private Watchdog Interrupt                    */
+
+/******  Platform Exceptions Numbers ***************************************************/
+  Watchdog_IRQn        = 32,        /*!< SP805 Interrupt        */
+  Timer0_IRQn          = 34,        /*!< SP804 Interrupt        */
+  Timer1_IRQn          = 35,        /*!< SP804 Interrupt        */
+  RTClock_IRQn         = 36,        /*!< PL031 Interrupt        */
+  UART0_IRQn           = 37,        /*!< PL011 Interrupt        */
+  UART1_IRQn           = 38,        /*!< PL011 Interrupt        */
+  UART2_IRQn           = 39,        /*!< PL011 Interrupt        */
+  UART3_IRQn           = 40,        /*!< PL011 Interrupt        */
+  MCI0_IRQn            = 41,        /*!< PL180 Interrupt (1st)  */
+  MCI1_IRQn            = 42,        /*!< PL180 Interrupt (2nd)  */
+  AACI_IRQn            = 43,        /*!< PL041 Interrupt        */
+  Keyboard_IRQn        = 44,        /*!< PL050 Interrupt        */
+  Mouse_IRQn           = 45,        /*!< PL050 Interrupt        */
+  CLCD_IRQn            = 46,        /*!< PL111 Interrupt        */
+  Ethernet_IRQn        = 47,        /*!< SMSC_91C111 Interrupt  */
+  VFS2_IRQn            = 73,        /*!< VFS2 Interrupt         */
+} IRQn_Type;
+
+/******************************************************************************/
+/*                         Peripheral memory map                              */
+/******************************************************************************/
+
+/* Peripheral and RAM base address */
+#define VE_A9_MP_FLASH_BASE0                  (0x00000000UL)                        /*!< (FLASH0    ) Base Address */
+#define VE_A9_MP_FLASH_BASE1                  (0x08000000UL)                        /*!< (FLASH1    ) Base Address */
+#define VE_A9_MP_PERIPH_BASE                  (0x18000000UL)                        /*!< (Peripheral) Base Address */
+#define VE_A9_MP_SRAM_BASE                    (0x2E000000UL)                        /*!< (SRAM      ) Base Address */
+#define VE_A9_MP_DRAM_BASE                    (0x80000000UL)                        /*!< (DRAM      ) Base Address */
+#define VE_A9_MP_VRAM_BASE                    (0x18000000UL)                        /*!< (VRAM      ) Base Address */
+#define VE_A9_MP_ETHERNET_BASE                (0x02000000UL + VE_A9_MP_PERIPH_BASE) /*!< (ETHERNET  ) Base Address */
+#define VE_A9_MP_USB_BASE                     (0x03000000UL + VE_A9_MP_PERIPH_BASE) /*!< (USB       ) Base Address */
+#define VE_A9_MP_DAP_BASE                     (0x1C000000UL)                        /*!< (DAP       ) Base Address */
+#define VE_A9_MP_SYSTEM_REG_BASE              (0x00010000UL + 0x1C000000UL)         /*!< (SYSTEM REG) Base Address */
+#define VE_A9_MP_SERIAL_BASE                  (0x00030000UL + 0x1C000000UL)         /*!< (SERIAL    ) Base Address */
+#define VE_A9_MP_AACI_BASE                    (0x00040000UL + 0x1C000000UL)         /*!< (AACI      ) Base Address */
+#define VE_A9_MP_MMCI_BASE                    (0x00050000UL + 0x1C000000UL)         /*!< (MMCI      ) Base Address */
+#define VE_A9_MP_KMI0_BASE                    (0x00060000UL + 0x1C000000UL)         /*!< (KMI0      ) Base Address */
+#define VE_A9_MP_UART_BASE                    (0x00090000UL + 0x1C000000UL)         /*!< (UART      ) Base Address */
+#define VE_A9_MP_WDT_BASE                     (0x000F0000UL + 0x1C000000UL)         /*!< (WDT       ) Base Address */
+#define VE_A9_MP_TIMER_BASE                   (0x00110000UL + 0x1C000000UL)         /*!< (TIMER     ) Base Address */
+#define VE_A9_MP_DVI_BASE                     (0x00160000UL + 0x1C000000UL)         /*!< (DVI       ) Base Address */
+#define VE_A9_MP_RTC_BASE                     (0x00170000UL + 0x1C000000UL)         /*!< (RTC       ) Base Address */
+#define VE_A9_MP_UART4_BASE                   (0x001B0000UL + 0x1C000000UL)         /*!< (UART4     ) Base Address */
+#define VE_A9_MP_CLCD_BASE                    (0x001F0000UL + 0x1C000000UL)         /*!< (CLCD      ) Base Address */
+#define VE_A9_MP_GIC_DISTRIBUTOR_BASE         (0x00001000UL + 0x2C000000UL)         /*!< (GIC DIST  ) Base Address */
+#define VE_A9_MP_GIC_INTERFACE_BASE           (0x00000100UL + 0x2C000000UL)         /*!< (GIC CPU IF) Base Address */
+#define GIC_DISTRIBUTOR_BASE                  VE_A9_MP_GIC_DISTRIBUTOR_BASE
+#define GIC_INTERFACE_BASE                    VE_A9_MP_GIC_INTERFACE_BASE
+
+//The VE-A9 model implements L1 cache as architecturally defined, but does not implement L2 cache.
+//Do not enable the L2 cache if you are running RTX on a VE-A9 model as it may cause a data abort.
+#define VE_A9_MP_PL310_BASE                   (0x1E00A000UL)                        /*!< (L2C-310   ) Base Address */
+#define L2C_310_BASE                          VE_A9_MP_PL310_BASE
+
+/* --------  Configuration of the Cortex-A9 Processor and Core Peripherals  ------- */
+#define __CA_REV        0x0000U    /*!< Core revision r0p0                          */
+#define __CORTEX_A           9U    /*!< Cortex-A9 Core                              */
+#define __FPU_PRESENT        1U    /* FPU present                                   */
+
+#include "core_ca.h"
+#include <system_ARMCA9.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // __ARMCA9_H__
diff --git a/Device/ARM/ARMCA9/Include/system_ARMCA9.h b/Device/ARM/ARMCA9/Include/system_ARMCA9.h
new file mode 100644
index 0000000..9b1e9f2
--- /dev/null
+++ b/Device/ARM/ARMCA9/Include/system_ARMCA9.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ * @file     system_ARMCA9.h
+ * @brief    CMSIS Device System Header File for ARM Cortex-A Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SYSTEM_ARMCA9_H
+#define __SYSTEM_ARMCA9_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void(*IRQHandler)();
+uint32_t InterruptHandlerRegister(IRQn_Type, IRQHandler);
+uint32_t InterruptHandlerUnregister(IRQn_Type);
+
+/**
+ * Initialize the system
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief  Setup the microcontroller system.
+ *         Initialize the System and update the SystemCoreClock variable.
+ */
+extern void SystemInit (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_ARMCA9_H */
diff --git a/Device/ARM/ARMCA9/Source/ARM/ARMCA9.sct b/Device/ARM/ARMCA9/Source/ARM/ARMCA9.sct
new file mode 100644
index 0000000..66cf773
--- /dev/null
+++ b/Device/ARM/ARMCA9/Source/ARM/ARMCA9.sct
@@ -0,0 +1,37 @@
+;**************************************************
+; Copyright (c) 2017 ARM Ltd.  All rights reserved.
+;**************************************************
+
+; Scatter-file for RTX Example on Versatile Express
+
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
+
+; This platform has 2GB SDRAM starting at 0x80000000.
+
+
+SDRAM 0x80000000 0x40000000
+{
+    VECTORS +0 0x200000
+    {
+        * (RESET, +FIRST)         ; Vector table and other (assembler) startup code
+        * (InRoot$$Sections)      ; All (library) code that must be in a root region
+        * (+RO-CODE)              ; Application RO code (.text)
+        * (+RO-DATA)              ; Application RO data (.constdata)
+    }
+
+    RW_DATA 0x80200000 0x100000
+    { * (+RW) }                   ; Application RW data (.data)
+
+    ZI_DATA 0x80300000 0x0F0000
+    { * (+ZI) }                   ; Application ZI data (.bss)
+
+    ARM_LIB_STACK 0x80400000 EMPTY -0x8000 ; Stack region growing down
+    { }
+    
+    ARM_LIB_HEAP  0x803F0000 EMPTY  0x8000 ; Heap region growing up
+    { }
+
+    TTB     0x80500000 EMPTY 0x4000
+    { }                           ; Level-1 Translation Table for MMU
+
+}
diff --git a/Device/ARM/ARMCA9/Source/ARM/startup_ARMCA9.s b/Device/ARM/ARMCA9/Source/ARM/startup_ARMCA9.s
new file mode 100644
index 0000000..d489175
--- /dev/null
+++ b/Device/ARM/ARMCA9/Source/ARM/startup_ARMCA9.s
@@ -0,0 +1,492 @@
+;/******************************************************************************
+; * @file     startup_ARMCA9.s
+; * @brief    CMSIS Core Device Startup File for ARM Cortex-A9 Device Series
+; * @version  V1.00
+; * @date     22 Feb 2017
+; *
+; * @note
+; *
+; ******************************************************************************/
+;/*
+; * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Licensed under the Apache License, Version 2.0 (the License); you may
+; * not use this file except in compliance with the License.
+; * You may obtain a copy of the License at
+; *
+; * www.apache.org/licenses/LICENSE-2.0
+; *
+; * Unless required by applicable law or agreed to in writing, software
+; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; * See the License for the specific language governing permissions and
+; * limitations under the License.
+; */
+;/*
+;//-------- <<< Use Configuration Wizard in Context Menu >>> ------------------
+;*/
+
+GICI_BASE       EQU     0x2C000100
+ICCIAR_OFFSET   EQU     0x0000000C
+ICCEOIR_OFFSET  EQU     0x00000010
+ICCHPIR_OFFSET  EQU     0x00000018
+
+GICD_BASE       EQU     0x2C001000
+ICDABR0_OFFSET  EQU     0x00000300
+ICDIPR0_OFFSET  EQU     0x00000400
+
+Mode_USR        EQU     0x10
+Mode_FIQ        EQU     0x11
+Mode_IRQ        EQU     0x12
+Mode_SVC        EQU     0x13
+Mode_ABT        EQU     0x17
+Mode_UND        EQU     0x1B
+Mode_SYS        EQU     0x1F
+
+I_Bit           EQU     0x80            ; when I bit is set, IRQ is disabled
+F_Bit           EQU     0x40            ; when F bit is set, FIQ is disabled
+T_Bit           EQU     0x20            ; when T bit is set, core is in Thumb state
+
+Sect_Normal     EQU     0x00005c06 ;outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
+Sect_Normal_Cod EQU     0x0000dc06 ;outer & inner wb/wa, non-shareable, executable, ro, domain 0, base addr 0
+Sect_Normal_RO  EQU     0x0000dc16 ;as Sect_Normal_Cod, but not executable
+Sect_Normal_RW  EQU     0x00005c16 ;as Sect_Normal_Cod, but writeable and not executable
+Sect_SO         EQU     0x00000c12 ;strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
+Sect_Device_RO  EQU     0x00008c12 ;device, non-shareable, non-executable, ro, domain 0, base addr 0
+Sect_Device_RW  EQU     0x00000c12 ;as Sect_Device_RO, but writeable
+Sect_Fault      EQU     0x00000000 ;this translation will fault (the bottom 2 bits are important, the rest are ignored)
+
+RAM_BASE        EQU     0x80000000
+VRAM_BASE       EQU     0x18000000
+SRAM_BASE       EQU     0x2e000000
+ETHERNET        EQU     0x1a000000
+CS3_PERIPHERAL_BASE EQU 0x1c000000
+
+; <h> Stack Configuration
+;   <o> Stack Size (in Bytes, per mode) <0x0-0xFFFFFFFF:8>
+; </h>
+
+UND_Stack_Size  EQU     0x00000100
+SVC_Stack_Size  EQU     0x00000100
+ABT_Stack_Size  EQU     0x00000100
+FIQ_Stack_Size  EQU     0x00000000
+IRQ_Stack_Size  EQU     0x00000100
+USR_Stack_Size  EQU     0x00000100
+
+ISR_Stack_Size  EQU     (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
+                         FIQ_Stack_Size + IRQ_Stack_Size)
+
+                AREA    STACK, NOINIT, READWRITE, ALIGN=3
+Stack_Mem       SPACE   USR_Stack_Size
+__initial_sp    SPACE   ISR_Stack_Size
+
+Stack_Top
+
+
+; <h> Heap Configuration
+;   <o>  Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
+; </h>
+
+Heap_Size       EQU     0x00000000
+
+                AREA    HEAP, NOINIT, READWRITE, ALIGN=3
+__heap_base
+Heap_Mem        SPACE   Heap_Size
+__heap_limit
+
+
+                PRESERVE8
+                ARM
+
+
+; Vector Table Mapped to Address 0 at Reset
+
+                AREA    RESET, CODE, READONLY
+                EXPORT  __Vectors
+                EXPORT  __Vectors_End
+                EXPORT  __Vectors_Size
+
+__Vectors       LDR     PC, Reset_Addr            ; Address of Reset Handler
+                LDR     PC, Undef_Addr            ; Address of Undef Handler
+                LDR     PC, SVC_Addr              ; Address of SVC Handler
+                LDR     PC, PAbt_Addr             ; Address of Prefetch Abort Handler
+                LDR     PC, DAbt_Addr             ; Address of Data Abort Handler
+                NOP                               ; Reserved Vector
+                LDR     PC, IRQ_Addr              ; Address of IRQ Handler
+                LDR     PC, FIQ_Addr              ; Address of FIQ Handler
+__Vectors_End
+
+__Vectors_Size  EQU     __Vectors_End - __Vectors
+
+Reset_Addr      DCD     Reset_Handler
+Undef_Addr      DCD     Undef_Handler
+SVC_Addr        DCD     SVC_Handler
+PAbt_Addr       DCD     PAbt_Handler
+DAbt_Addr       DCD     DAbt_Handler
+IRQ_Addr        DCD     IRQ_Handler
+FIQ_Addr        DCD     FIQ_Handler
+
+                AREA    |.text|, CODE, READONLY
+
+Reset_Handler   PROC
+                EXPORT  Reset_Handler             [WEAK]
+                IMPORT  SystemInit
+                IMPORT  __main
+
+                ; Put any cores other than 0 to sleep
+                MRC     p15, 0, R0, c0, c0, 5     ; Read MPIDR
+                ANDS    R0, R0, #3
+goToSleep
+                WFINE
+                BNE     goToSleep
+
+                MRC     p15, 0, R0, c1, c0, 0       ; Read CP15 System Control register
+                BIC     R0, R0, #(0x1 << 12)        ; Clear I bit 12 to disable I Cache
+                BIC     R0, R0, #(0x1 <<  2)        ; Clear C bit  2 to disable D Cache
+                BIC     R0, R0, #0x1                ; Clear M bit  0 to disable MMU
+                BIC     R0, R0, #(0x1 << 11)        ; Clear Z bit 11 to disable branch prediction
+                BIC     R0, R0, #(0x1 << 13)        ; Clear V bit 13 to disable hivecs
+                MCR     p15, 0, R0, c1, c0, 0       ; Write value back to CP15 System Control register
+                ISB
+
+; Configure ACTLR
+                MRC     p15, 0, r0, c1, c0, 1       ; Read CP15 Auxiliary Control Register
+                ORR     r0, r0, #(1 <<  1)          ; Enable L2 prefetch hint (UNK/WI since r4p1)
+                MCR     p15, 0, r0, c1, c0, 1       ; Write CP15 Auxiliary Control Register
+
+; Set Vector Base Address Register (VBAR) to point to this application's vector table
+                LDR     R0, =__Vectors
+                MCR     p15, 0, R0, c12, c0, 0
+
+;  Setup Stack for each exceptional mode
+                LDR     R0, =Stack_Top
+
+;  Enter Undefined Instruction Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_UND:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #UND_Stack_Size
+
+;  Enter Abort Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_ABT:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #ABT_Stack_Size
+
+;  Enter FIQ Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_FIQ:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #FIQ_Stack_Size
+
+;  Enter IRQ Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_IRQ:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+                SUB     R0, R0, #IRQ_Stack_Size
+
+;  Enter Supervisor Mode and set its Stack Pointer
+                MSR     CPSR_C, #Mode_SVC:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+
+;  Enter System Mode to complete initialization and enter kernel
+                MSR     CPSR_C, #Mode_SYS:OR:I_Bit:OR:F_Bit
+                MOV     SP, R0
+
+                IMPORT  MMU_CreateTranslationTable
+                BL      MMU_CreateTranslationTable
+
+                MOV     r0, #0x0
+                MCR     p15, 0, r0, c8, c7, 0     ; TLBIALL - Invalidate entire Unified TLB
+                MCR     p15, 0, r0, c7, c5, 6     ; BPIALL  - Invalidate entire branch predictor array
+                DSB
+                ISB
+                MCR     p15, 0, r0, c7, c5, 0     ; ICIALLU - Invalidate instruction cache and flush branch target cache
+                DSB
+                ISB
+
+;  Invalidate data cache
+                MOV     r0, #0x0                  ; 0 = invalidate data cache, 1 = clean data cache.
+
+                MRC     p15, 1, R6, c0, c0, 1     ; Read CLIDR
+                ANDS    R3, R6, #0x07000000       ; Extract coherency level
+                MOV     R3, R3, LSR #23           ; Total cache levels << 1
+                BEQ     Finished                  ; If 0, no need to clean
+
+                MOV     R10, #0                   ; R10 holds current cache level << 1
+Loop1           ADD     R2, R10, R10, LSR #1      ; R2 holds cache "Set" position
+                MOV     R1, R6, LSR R2            ; Bottom 3 bits are the Cache-type for this level
+                AND     R1, R1, #7                ; Isolate those lower 3 bits
+                CMP     R1, #2
+                BLT     Skip                      ; No cache or only instruction cache at this level
+
+                MCR     p15, 2, R10, c0, c0, 0    ; Write the Cache Size selection register
+                ISB                               ; ISB to sync the change to the CacheSizeID reg
+                MRC     p15, 1, R1, c0, c0, 0     ; Reads current Cache Size ID register
+                AND     R2, R1, #7                ; Extract the line length field
+                ADD     R2, R2, #4                ; Add 4 for the line length offset (log2 16 bytes)
+                LDR     R4, =0x3FF
+                ANDS    R4, R4, R1, LSR #3        ; R4 is the max number on the way size (right aligned)
+                CLZ     R5, R4                    ; R5 is the bit position of the way size increment
+                LDR     R7, =0x7FFF
+                ANDS    R7, R7, R1, LSR #13       ; R7 is the max number of the index size (right aligned)
+
+Loop2           MOV     R9, R4                    ; R9 working copy of the max way size (right aligned)
+
+Loop3           ORR     R11, R10, R9, LSL R5      ; Factor in the Way number and cache number into R11
+                ORR     R11, R11, R7, LSL R2      ; Factor in the Set number
+                CMP     R0, #0
+                BNE     Dccsw
+                MCR     p15, 0, R11, c7, c6, 2    ; DCISW. Invalidate by Set/Way
+                B       cont
+Dccsw           CMP     R0, #1
+                BNE     Dccisw
+                MCR     p15, 0, R11, c7, c10, 2   ; DCCSW. Clean by Set/Way
+                B       cont
+Dccisw          MCR     p15, 0, R11, c7, c14, 2   ; DCCISW. Clean and Invalidate by Set/Way
+cont            SUBS    R9, R9, #1                ; Decrement the Way number
+                BGE     Loop3
+                SUBS    R7, R7, #1                ; Decrement the Set number
+                BGE     Loop2
+Skip            ADD     R10, R10, #2              ; Increment the cache number
+                CMP     R3, R10
+                BGT     Loop1
+Finished
+                DSB
+
+;  Enable MMU, but leave caches disabled (they will be enabled later)
+                MRC     p15, 0, r0, c1, c0, 0     ; Read CP15 System Control register
+                ORR     r0, r0, #(0x1 << 29)      ; Set AFE bit 29 to enable simplified access permissions model
+                BIC     r0, r0, #(0x1 << 28)      ; Clear TRE bit 28 to disable TEX remap
+                BIC     r0, r0, #(0x1 << 12)      ; Clear I bit 12 to disable I Cache
+                BIC     r0, r0, #(0x1 <<  2)      ; Clear C bit  2 to disable D Cache
+                BIC     r0, r0, #(0x1 <<  1)      ; Clear A bit  1 to disable strict alignment fault checking
+                ORR     r0, r0, #0x1              ; Set M bit 0 to enable MMU
+                MCR     p15, 0, r0, c1, c0, 0     ; Write CP15 System Control register
+
+;  USR/SYS stack pointer will be set during kernel init
+                LDR     R0, =SystemInit
+                BLX     R0
+                LDR     R0, =__main
+                BLX     R0
+
+                ENDP
+
+Undef_Handler\
+                PROC
+                EXPORT  Undef_Handler             [WEAK]
+                IMPORT  CUndefHandler
+                SRSFD   SP!, #Mode_UND
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to UND mode stack
+
+                MRS     R0, SPSR
+                TST     R0, #T_Bit                ; Check mode
+                MOVEQ   R1, #4                    ; R1 = 4 ARM mode
+                MOVNE   R1, #2                    ; R1 = 2 Thumb mode
+                SUB     R0, LR, R1
+                LDREQ   R0, [R0]                  ; ARM mode - R0 points to offending instruction
+                BEQ     undef_cont
+
+                ;Thumb instruction
+                ;Determine if it is a 32-bit Thumb instruction
+                LDRH    R0, [R0]
+                MOV     R2, #0x1c
+                CMP     R2, R0, LSR #11
+                BHS     undef_cont                ;16-bit Thumb instruction
+
+                ;32-bit Thumb instruction. Unaligned - we need to reconstruct the offending instruction
+                LDRH    R2, [LR]
+                ORR     R0, R2, R0, LSL #16
+undef_cont
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                ;R0 Offending instruction
+                ;R1 =2 (Thumb) or =4 (ARM)
+                BL      CUndefHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                LDR     LR, [SP, #24]             ; Restore stacked LR and possibly adjust for retry
+                SUB     LR, LR, R0
+                LDR     R0, [SP, #28]             ; Restore stacked SPSR
+                MSR     SPSR_CXSF, R0
+                POP     {R0-R4, R12}              ; Restore stacked APCS registers
+                ADD     SP, SP, #8                ; Adjust SP for already-restored banked registers
+                MOVS    PC, LR
+                ENDP
+
+PAbt_Handler\
+                PROC
+                EXPORT  PAbt_Handler              [WEAK]
+                IMPORT  CPAbtHandler
+                SUB     LR, LR, #4                ; Pre-adjust LR
+                SRSFD   SP!, #Mode_ABT            ; Save LR and SPRS to ABT mode stack
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to ABT mode stack
+                MRC     p15, 0, R0, c5, c0, 1     ; IFSR
+                MRC     p15, 0, R1, c6, c0, 2     ; IFAR
+
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                BL      CPAbtHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                POP     {R0-R4, R12}              ; Restore stack APCS registers
+                RFEFD   SP!                       ; Return from exception
+                ENDP
+
+
+DAbt_Handler\
+                PROC
+                EXPORT  DAbt_Handler              [WEAK]
+                IMPORT  CDAbtHandler
+                SUB     LR, LR, #8                ; Pre-adjust LR
+                SRSFD   SP!, #Mode_ABT            ; Save LR and SPRS to ABT mode stack
+                PUSH    {R0-R4, R12}              ; Save APCS corruptible registers to ABT mode stack
+                CLREX                             ; State of exclusive monitors unknown after taken data abort
+                MRC     p15, 0, R0, c5, c0, 0     ; DFSR
+                MRC     p15, 0, R1, c6, c0, 0     ; DFAR
+
+                MOV     R2, LR                    ; Set LR to third argument
+
+                AND     R12, SP, #4               ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R12               ; Adjust stack
+                PUSH    {R12, LR}                 ; Store stack adjustment and dummy LR
+
+                BL      CDAbtHandler
+
+                POP     {R12, LR}                 ; Get stack adjustment & discard dummy LR
+                ADD     SP, SP, R12               ; Unadjust stack
+
+                POP     {R0-R4, R12}              ; Restore stacked APCS registers
+                RFEFD   SP!                       ; Return from exception
+                ENDP
+
+FIQ_Handler\
+                PROC
+                EXPORT  FIQ_Handler               [WEAK]
+                ;; An FIQ might occur between the dummy read and the real read of the GIC in IRQ_Handler,
+                ;; so if a real FIQ Handler is implemented, this will be needed before returning:
+                ;; LDR     R1, =GICI_BASE
+                ;; LDR     R0, [R1, #ICCHPIR_OFFSET]   ; Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120
+                B       .
+                ENDP
+
+SVC_Handler\
+                PROC
+                EXPORT  SVC_Handler               [WEAK]
+                B       .
+                ENDP
+
+IRQ_Handler\
+                PROC
+                EXPORT  IRQ_Handler                [WEAK]
+                IMPORT  IRQCount
+                IMPORT  IRQTable
+                IMPORT  IRQNestLevel                ; Flag indicates whether inside an ISR, and the depth of nesting.  0 = not in ISR.
+                IMPORT  seen_id0_active             ; Flag used to workaround GIC 390 errata 733075
+
+                ;prologue
+                SUB     LR, LR, #4                  ; Pre-adjust LR
+                SRSFD   SP!, #Mode_SVC              ; Save LR_IRQ and SPRS_IRQ to SVC mode stack
+                CPS     #Mode_SVC                   ; Switch to SVC mode, to avoid a nested interrupt corrupting LR on a BL
+                PUSH    {R0-R3, R12}                ; Save remaining APCS corruptible registers to SVC stack
+
+                AND     R1, SP, #4                  ; Ensure stack is 8-byte aligned
+                SUB     SP, SP, R1                  ; Adjust stack
+                PUSH    {R1, LR}                    ; Store stack adjustment and LR_SVC to SVC stack
+
+                LDR     R0, =IRQNestLevel           ; Get address of nesting counter
+                LDR     R1, [R0]
+                ADD     R1, R1, #1                  ; Increment nesting counter
+                STR     R1, [R0]
+
+                ;identify and acknowledge interrupt
+                LDR     R1, =GICI_BASE
+                LDR     R0, [R1, #ICCHPIR_OFFSET]   ; Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120
+                LDR     R0, [R1, #ICCIAR_OFFSET]    ; Read ICCIAR (GIC CPU Interface register)
+                DSB                                 ; Ensure that interrupt acknowledge completes before re-enabling interrupts
+
+                ; Workaround GIC 390 errata 733075 - see GIC-390_Errata_Notice_v6.pdf dated 09-Jul-2014
+                ; The following workaround code is for a single-core system.  It would be different in a multi-core system.
+                ; If the ID is 0 or 0x3FE or 0x3FF, then the GIC CPU interface may be locked-up so unlock it, otherwise service the interrupt as normal
+                ; Special IDs 1020=0x3FC and 1021=0x3FD are reserved values in GICv1 and GICv2 so will not occur here
+                CMP     R0, #0
+                BEQ     unlock
+                MOV     R2, #0x3FE
+                CMP     R0, R2
+                BLT     normal
+unlock
+                ; Unlock the CPU interface with a dummy write to ICDIPR0
+                LDR     R2, =GICD_BASE
+                LDR     R3, [R2, #ICDIPR0_OFFSET]
+                STR     R3, [R2, #ICDIPR0_OFFSET]
+                DSB                                 ; Ensure the write completes before continuing
+
+                ; If the ID is 0 and it is active and has not been seen before, then service it as normal,
+                ; otherwise the interrupt should be treated as spurious and not serviced.
+                CMP     R0, #0
+                BNE     ret_irq                     ; Not 0, so spurious
+                LDR     R3, [R2, #ICDABR0_OFFSET]   ; Get the interrupt state
+                TST     R3, #1
+                BEQ     ret_irq                     ; Not active, so spurious
+                LDR     R2, =seen_id0_active
+                LDRB    R3, [R2]
+                CMP     R3, #1
+                BEQ     ret_irq                     ; Seen it before, so spurious
+
+                ; Record that ID0 has now been seen, then service it as normal
+                MOV     R3, #1
+                STRB    R3, [R2]
+                ; End of Workaround GIC 390 errata 733075
+
+normal
+                LDR     R2, =IRQCount               ; Read number of entries in IRQ handler table
+                LDR     R2, [R2]
+                CMP     R0, R2                      ; Is there a handler for this IRQ?
+                BHS     end_int                     ; No handler, so return as normal
+                LDR     R2, =IRQTable               ; Get address of handler
+                LDR     R2, [R2, R0, LSL #2]
+                CMP     R2, #0                      ; Clean up and return if handler address is 0
+                BEQ     end_int
+                PUSH    {R0,R1}
+
+                CPSIE   i                           ; Now safe to re-enable interrupts
+                BLX     R2                          ; Call handler. R0 = IRQ number. Beware calls to PendSV_Handler and OS_Tick_Handler do not return this way
+                CPSID   i                           ; Disable interrupts again
+
+                POP     {R0,R1}
+                DSB                                 ; Ensure that interrupt source is cleared before signalling End Of Interrupt
+end_int
+                ; R0 still contains the interrupt ID
+                ; R1 still contains GICI_BASE
+                ; EOI does not need to be written for IDs 1020 to 1023 (0x3FC to 0x3FF)
+                STR     R0, [R1, #ICCEOIR_OFFSET]   ; Normal end-of-interrupt write to EOIR (GIC CPU Interface register) to clear the active bit
+
+                ; If it was ID0, clear the seen flag, otherwise return as normal
+                CMP     R0, #0
+                LDREQ   R1, =seen_id0_active
+                STRBEQ  R0, [R1]                    ; Clear the seen flag, using R0 (which is 0), to save loading another register
+ret_irq
+                ;epilogue
+                LDR     R0, =IRQNestLevel           ; Get address of nesting counter
+                LDR     R1, [R0]
+                SUB     R1, R1, #1                  ; Decrement nesting counter
+                STR     R1, [R0]
+
+                POP     {R1, LR}                    ; Get stack adjustment and restore LR_SVC
+                ADD     SP, SP, R1                  ; Unadjust stack
+
+                POP     {R0-R3,R12}                 ; Restore stacked APCS registers
+                RFEFD   SP!                         ; Return from exception
+                ENDP
+
+                END
diff --git a/Device/ARM/ARMCA9/Source/mmu_ARMCA9.c b/Device/ARM/ARMCA9/Source/mmu_ARMCA9.c
new file mode 100644
index 0000000..735c170
--- /dev/null
+++ b/Device/ARM/ARMCA9/Source/mmu_ARMCA9.c
@@ -0,0 +1,236 @@
+/**************************************************************************//**
+ * @file     mmu_ARMCA7.c
+ * @brief    MMU Configuration for ARM Cortex-A7 Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Memory map description from: DUI0447G_v2m_p1_trm.pdf 4.2.2 ARM Cortex-A Series memory map
+
+                                                     Memory Type
+0xffffffff |--------------------------|             ------------
+           |       FLAG SYNC          |             Device Memory
+0xfffff000 |--------------------------|             ------------
+           |         Fault            |                Fault
+0xfff00000 |--------------------------|             ------------
+           |                          |                Normal
+           |                          |
+           |      Daughterboard       |
+           |         memory           |
+           |                          |
+0x80505000 |--------------------------|             ------------
+           |TTB (L2 Sync Flags   ) 4k |                Normal
+0x80504C00 |--------------------------|             ------------
+           |TTB (L2 Peripherals-B) 16k|                Normal
+0x80504800 |--------------------------|             ------------
+           |TTB (L2 Peripherals-A) 16k|                Normal
+0x80504400 |--------------------------|             ------------
+           |TTB (L2 Priv Periphs)  4k |                Normal
+0x80504000 |--------------------------|             ------------
+           |    TTB (L1 Descriptors)  |                Normal
+0x80500000 |--------------------------|             ------------
+           |           Heap           |                Normal
+           |--------------------------|             ------------
+           |          Stack           |                Normal
+0x80400000 |--------------------------|             ------------
+           |         ZI Data          |                Normal
+0x80300000 |--------------------------|             ------------
+           |         RW Data          |                Normal
+0x80200000 |--------------------------|             ------------
+           |         RO Data          |                Normal
+           |--------------------------|             ------------
+           |         RO Code          |              USH Normal
+0x80000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |      HSB AXI buses       |
+0x40000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |  test chips peripherals  |
+0x2c002000 |--------------------------|             ------------
+           |     Private Address      |            Device Memory
+0x2c000000 |--------------------------|             ------------
+           |      Daughterboard       |                Fault
+           |  test chips peripherals  |
+0x20000000 |--------------------------|             ------------
+           |       Peripherals        |           Device Memory RW/RO
+           |                          |              & Fault
+0x00000000 |--------------------------|
+*/
+
+// L1 Cache info and restrictions about architecture of the caches (CCSIR register):
+// Write-Through support *not* available
+// Write-Back support available.
+// Read allocation support available.
+// Write allocation support available.
+
+//Note: You should use the Shareable attribute carefully.
+//For cores without coherency logic (such as SCU) marking a region as shareable forces the processor to not cache that region regardless of the inner cache settings.
+//Cortex-A versions of RTX use LDREX/STREX instructions relying on Local monitors. Local monitors will be used only when the region gets cached, regions that are not cached will use the Global Monitor.
+//Some Cortex-A implementations do not include Global Monitors, so wrongly setting the attribute Shareable may cause STREX to fail.
+
+//Recall: When the Shareable attribute is applied to a memory region that is not Write-Back, Normal memory, data held in this region is treated as Non-cacheable.
+//When SMP bit = 0, Inner WB/WA Cacheable Shareable attributes are treated as Non-cacheable.
+//When SMP bit = 1, Inner WB/WA Cacheable Shareable attributes are treated as Cacheable.
+
+
+//Following MMU configuration is expected
+//SCTLR.AFE == 1 (Simplified access permissions model - AP[2:1] define access permissions, AP[0] is an access flag)
+//SCTLR.TRE == 0 (TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor)
+//Domain 0 is always the Client domain
+//Descriptors should place all memory in domain 0
+
+#include <stdint.h>
+#include "ARMCA9.h"
+
+
+// L2 table pointers
+//----------------------------------------
+#define PRIVATE_TABLE_L2_BASE_4k       (0x80504000) //Map 4k Private Address space
+#define SYNC_FLAGS_TABLE_L2_BASE_4k    (0x80504C00) //Map 4k Flag synchronization
+#define PERIPHERAL_A_TABLE_L2_BASE_64k (0x80504400) //Map 64k Peripheral #1 0x1C000000 - 0x1C00FFFFF
+#define PERIPHERAL_B_TABLE_L2_BASE_64k (0x80504800) //Map 64k Peripheral #2 0x1C100000 - 0x1C1FFFFFF
+
+//--------------------- PERIPHERALS -------------------
+#define PERIPHERAL_A_FAULT (0x00000000 + 0x1c000000) //0x1C000000-0x1C00FFFF (1M)
+#define PERIPHERAL_B_FAULT (0x00100000 + 0x1c000000) //0x1C100000-0x1C10FFFF (1M)
+
+//--------------------- SYNC FLAGS --------------------
+#define FLAG_SYNC     0xFFFFF000
+#define F_SYNC_BASE   0xFFF00000  //1M aligned
+
+//Import symbols from linker
+extern uint32_t Image$$VECTORS$$Base;
+extern uint32_t Image$$RW_DATA$$Base;
+extern uint32_t Image$$ZI_DATA$$Base;
+extern uint32_t Image$$TTB$$ZI$$Base;
+
+static uint32_t Sect_Normal;     //outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
+static uint32_t Sect_Normal_Cod; //outer & inner wb/wa, non-shareable, executable, ro, domain 0, base addr 0
+static uint32_t Sect_Normal_RO;  //as Sect_Normal_Cod, but not executable
+static uint32_t Sect_Normal_RW;  //as Sect_Normal_Cod, but writeable and not executable
+static uint32_t Sect_Device_RO;  //device, non-shareable, non-executable, ro, domain 0, base addr 0
+static uint32_t Sect_Device_RW;  //as Sect_Device_RO, but writeable
+
+/* Define global descriptors */
+static uint32_t Page_L1_4k  = 0x0;  //generic
+static uint32_t Page_L1_64k = 0x0;  //generic
+static uint32_t Page_4k_Device_RW;  //Shared device, not executable, rw, domain 0
+static uint32_t Page_64k_Device_RW; //Shared device, not executable, rw, domain 0
+
+void MMU_CreateTranslationTable(void)
+{
+    mmu_region_attributes_Type region;
+
+    //Create 4GB of faulting entries
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, 0, 4096, DESCRIPTOR_FAULT);
+
+    /*
+     * Generate descriptors. Refer to core_ca.h to get information about attributes
+     *
+     */
+    //Create descriptors for Vectors, RO, RW, ZI sections
+    section_normal(Sect_Normal, region);
+    section_normal_cod(Sect_Normal_Cod, region);
+    section_normal_ro(Sect_Normal_RO, region);
+    section_normal_rw(Sect_Normal_RW, region);
+    //Create descriptors for peripherals
+    section_device_ro(Sect_Device_RO, region);
+    section_device_rw(Sect_Device_RW, region);
+    //Create descriptors for 64k pages
+    page64k_device_rw(Page_L1_64k, Page_64k_Device_RW, region);
+    //Create descriptors for 4k pages
+    page4k_device_rw(Page_L1_4k, Page_4k_Device_RW, region);
+
+
+    /*
+     *  Define MMU flat-map regions and attributes
+     *
+     */
+
+    //Define Image
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, 1, Sect_Normal_Cod);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, 1, Sect_Normal_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, 1, Sect_Normal_RW);
+
+    //all DRAM executable, rw, cacheable - applications may choose to divide memory into ro executable
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$TTB$$ZI$$Base, 2043, Sect_Normal);
+
+    //--------------------- PERIPHERALS -------------------
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_FLASH_BASE0    , 64, Sect_Device_RO);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_FLASH_BASE1    , 64, Sect_Device_RO);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_SRAM_BASE      , 64, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_VRAM_BASE      , 32, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_ETHERNET_BASE  , 16, Sect_Device_RW);
+    MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_USB_BASE       , 16, Sect_Device_RW);
+
+    // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C000000-0x1C00FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_A_FAULT      , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT);
+    // Define peripheral range 0x1C000000-0x1C00FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_DAP_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_SYSTEM_REG_BASE,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_SERIAL_BASE    ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_AACI_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_MMCI_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_KMI0_BASE      ,  2, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_UART_BASE      ,  4, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_WDT_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+
+    // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C100000-0x1C10FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_B_FAULT      , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT);
+    // Define peripheral range 0x1C100000-0x1C10FFFF
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_TIMER_BASE     ,  2, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_DVI_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_RTC_BASE       ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_UART4_BASE     ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+    MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_CLCD_BASE      ,  1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW);
+
+    // Create (256 * 4k)=1MB faulting entries to cover private address space. Needs to be marked as Device memory
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR()            ,256,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT);
+    // Define private address space entry.
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR()            ,  2,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+    // Define L2CC entry.  Uncomment if PL310 is present
+    //    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, VE_A7_MP_PL310_BASE     ,  1,  Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+
+    // Create (256 * 4k)=1MB faulting entries to synchronization space (Useful if some non-cacheable DMA agent is present in the SoC)
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, F_SYNC_BASE , 256, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT);
+    // Define synchronization space entry.
+    MMU_TTPage4k (&Image$$TTB$$ZI$$Base, FLAG_SYNC   ,   1, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, Page_4k_Device_RW);
+
+    /* Set location of level 1 page table
+    ; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset)
+    ; 13:7  - 0x0
+    ; 6     - IRGN[0] 0x0 (Inner WB WA)
+    ; 5     - NOS     0x0 (Non-shared)
+    ; 4:3   - RGN     0x1 (Outer WB WA)
+    ; 2     - IMP     0x0 (Implementation Defined)
+    ; 1     - S       0x0 (Non-shared)
+    ; 0     - IRGN[1] 0x1 (Inner WB WA) */
+    __set_TTBR0(((uint32_t)&Image$$TTB$$ZI$$Base) | 9);
+    __ISB();
+
+    /* Set up domain access control register
+    ; We set domain 0 to Client and all other domains to No Access.
+    ; All translation table entries specify domain 0 */
+    __set_DACR(1);
+    __ISB();
+}
diff --git a/Device/ARM/ARMCA9/Source/system_ARMCA9.c b/Device/ARM/ARMCA9/Source/system_ARMCA9.c
new file mode 100644
index 0000000..e67cee2
--- /dev/null
+++ b/Device/ARM/ARMCA9/Source/system_ARMCA9.c
@@ -0,0 +1,292 @@
+/******************************************************************************
+ * @file     system_ARMCA9.c
+ * @brief    CMSIS Device System Source File for ARM Cortex-A9 Device Series
+ * @version  V1.00
+ * @date     22 Feb 2017
+ *
+ * @note
+ *
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ARMCA9.h>
+#include <stdint.h>
+
+extern void $Super$$main(void);
+__asm void __FPU_Enable(void);
+
+// Flag indicates whether inside an ISR, and the depth of nesting.  0 = not in ISR.
+uint32_t IRQNestLevel = 0;
+// Flag used to workaround GIC 390 errata 733075
+uint32_t seen_id0_active = 0;
+
+
+/**
+ * Initialize the memory subsystem.
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief Initialize the memory subsystem, including enabling the cache and BTAC. Requires PL1, so implemented as an SVC in case threads are USR mode.
+ */
+#pragma push
+#pragma arm
+void __svc(1) EnableCaches(void);
+void __SVC_1(void) {
+
+/* Before enabling the caches, the instruction cache, the data cache, TLB, and BTAC must have been invalidated.
+ * You are not required to invalidate the main TLB, even though it is recommended for safety
+ * reasons. This ensures compatibility with future revisions of the processor. */
+
+//  unsigned int l2_id;
+
+  /* After MMU is enabled and data has been invalidated, enable caches and BTAC */
+  L1C_EnableCaches();
+  L1C_EnableBTAC();
+
+  /* If L2C-310 is present, Invalidate and Enable L2 cache here */
+//  l2_id = L2C_GetID();
+//  if (l2_id)
+//  {
+//     L2C_InvAllByWay();
+//     L2C_Enable();
+//  }
+}
+#pragma pop
+
+IRQHandler IRQTable[] = {
+    0, //IRQ 0
+    0, //IRQ 1
+    0, //IRQ 2
+    0, //IRQ 3
+    0, //IRQ 4
+    0, //IRQ 5
+    0, //IRQ 6
+    0, //IRQ 7
+    0, //IRQ 8
+    0, //IRQ 9
+    0, //IRQ 10
+    0, //IRQ 11
+    0, //IRQ 12
+    0, //IRQ 13
+    0, //IRQ 14
+    0, //IRQ 15
+    0, //IRQ 16
+    0, //IRQ 17
+    0, //IRQ 18
+    0, //IRQ 19
+    0, //IRQ 20
+    0, //IRQ 21
+    0, //IRQ 22
+    0, //IRQ 23
+    0, //IRQ 24
+    0, //IRQ 25
+    0, //IRQ 26
+    0, //IRQ 27
+    0, //IRQ 28
+    0, //IRQ 29
+    0, //IRQ 30
+    0, //IRQ 31
+    0, //IRQ 32
+    0, //IRQ 33
+    0, //IRQ 34
+    0, //IRQ 35
+    0, //IRQ 36
+    0, //IRQ 37
+    0, //IRQ 38
+    0, //IRQ 39
+    0  //IRQ 40
+};
+uint32_t IRQCount = sizeof IRQTable / 4;
+
+uint32_t InterruptHandlerRegister (IRQn_Type irq, IRQHandler handler)
+{
+    if (irq < IRQCount) {
+        IRQTable[irq] = handler;
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}
+
+uint32_t InterruptHandlerUnregister (IRQn_Type irq)
+{
+    if (irq < IRQCount) {
+        IRQTable[irq] = 0;
+        return 0;
+    }
+    else {
+        return 1;
+    }
+}
+
+/**
+ * Initialize the system
+ *
+ * @param  none
+ * @return none
+ *
+ * @brief  Setup the microcontroller system.
+ *         Initialize the System.
+ */
+void SystemInit (void)
+{
+/*       do not use global variables because this function is called before
+         reaching pre-main. RW section may be overwritten afterwards.          */
+    GIC_Enable();
+}
+
+void $Sub$$main(void)
+{
+#ifdef __CMSIS_RTOS
+  extern void PendSV_Handler(uint32_t);
+  extern void OS_Tick_Handler(uint32_t);
+  InterruptHandlerRegister(SGI0_IRQn     , PendSV_Handler);
+  InterruptHandlerRegister(PrivTimer_IRQn, OS_Tick_Handler);
+  EnableCaches();
+#endif
+  
+  $Super$$main(); //Call main
+}
+
+//Fault Status Register (IFSR/DFSR) definitions
+#define FSR_ALIGNMENT_FAULT                  0x01   //DFSR only. Fault on first lookup
+#define FSR_INSTRUCTION_CACHE_MAINTENANCE    0x04   //DFSR only - async/external
+#define FSR_SYNC_EXT_TTB_WALK_FIRST          0x0c   //sync/external
+#define FSR_SYNC_EXT_TTB_WALK_SECOND         0x0e   //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_FIRST       0x1c   //sync/external
+#define FSR_SYNC_PARITY_TTB_WALK_SECOND      0x1e   //sync/external
+#define FSR_TRANSLATION_FAULT_FIRST          0x05   //MMU Fault - internal
+#define FSR_TRANSLATION_FAULT_SECOND         0x07   //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_FIRST          0x03   //MMU Fault - internal
+#define FSR_ACCESS_FLAG_FAULT_SECOND         0x06   //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_FIRST               0x09   //MMU Fault - internal
+#define FSR_DOMAIN_FAULT_SECOND              0x0b   //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_FIRST           0x0f   //MMU Fault - internal
+#define FSR_PERMISSION_FAULT_SECOND          0x0d   //MMU Fault - internal
+#define FSR_DEBUG_EVENT                      0x02   //internal
+#define FSR_SYNC_EXT_ABORT                   0x08   //sync/external
+#define FSR_TLB_CONFLICT_ABORT               0x10   //sync/external
+#define FSR_LOCKDOWN                         0x14   //internal
+#define FSR_COPROCESSOR_ABORT                0x1a   //internal
+#define FSR_SYNC_PARITY_ERROR                0x19   //sync/external
+#define FSR_ASYNC_EXTERNAL_ABORT             0x16   //DFSR only - async/external
+#define FSR_ASYNC_PARITY_ERROR               0x18   //DFSR only - async/external
+
+void CDAbtHandler(uint32_t DFSR, uint32_t DFAR, uint32_t LR) {
+  uint32_t FS = (DFSR & (1 << 10)) >> 6 | (DFSR & 0x0f); //Store Fault Status
+
+  switch(FS) {
+    //Synchronous parity errors - retry
+    case FSR_SYNC_PARITY_ERROR:
+    case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+    case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+        return;
+
+    //Your code here. Value in DFAR is invalid for some fault statuses.
+    case FSR_ALIGNMENT_FAULT:
+    case FSR_INSTRUCTION_CACHE_MAINTENANCE:
+    case FSR_SYNC_EXT_TTB_WALK_FIRST:
+    case FSR_SYNC_EXT_TTB_WALK_SECOND:
+    case FSR_TRANSLATION_FAULT_FIRST:
+    case FSR_TRANSLATION_FAULT_SECOND:
+    case FSR_ACCESS_FLAG_FAULT_FIRST:
+    case FSR_ACCESS_FLAG_FAULT_SECOND:
+    case FSR_DOMAIN_FAULT_FIRST:
+    case FSR_DOMAIN_FAULT_SECOND:
+    case FSR_PERMISSION_FAULT_FIRST:
+    case FSR_PERMISSION_FAULT_SECOND:
+    case FSR_DEBUG_EVENT:
+    case FSR_SYNC_EXT_ABORT:
+    case FSR_TLB_CONFLICT_ABORT:
+    case FSR_LOCKDOWN:
+    case FSR_COPROCESSOR_ABORT:
+    case FSR_ASYNC_EXTERNAL_ABORT: //DFAR invalid
+    case FSR_ASYNC_PARITY_ERROR:   //DFAR invalid
+    default:
+      while(1);
+  }
+}
+
+void CPAbtHandler(uint32_t IFSR, uint32_t IFAR, uint32_t LR) {
+  uint32_t FS = (IFSR & (1 << 10)) >> 6 | (IFSR & 0x0f); //Store Fault Status
+
+  switch(FS) {
+    //Synchronous parity errors - retry
+    case FSR_SYNC_PARITY_ERROR:
+    case FSR_SYNC_PARITY_TTB_WALK_FIRST:
+    case FSR_SYNC_PARITY_TTB_WALK_SECOND:
+      return;
+
+    //Your code here. Value in IFAR is invalid for some fault statuses.
+    case FSR_SYNC_EXT_TTB_WALK_FIRST:
+    case FSR_SYNC_EXT_TTB_WALK_SECOND:
+    case FSR_TRANSLATION_FAULT_FIRST:
+    case FSR_TRANSLATION_FAULT_SECOND:
+    case FSR_ACCESS_FLAG_FAULT_FIRST:
+    case FSR_ACCESS_FLAG_FAULT_SECOND:
+    case FSR_DOMAIN_FAULT_FIRST:
+    case FSR_DOMAIN_FAULT_SECOND:
+    case FSR_PERMISSION_FAULT_FIRST:
+    case FSR_PERMISSION_FAULT_SECOND:
+    case FSR_DEBUG_EVENT: //IFAR invalid
+    case FSR_SYNC_EXT_ABORT:
+    case FSR_TLB_CONFLICT_ABORT:
+    case FSR_LOCKDOWN:
+    case FSR_COPROCESSOR_ABORT:
+    default:
+      while(1);
+  }
+}
+
+//returns amount to decrement lr by
+//this will be 0 when we have emulated the instruction and want to execute the next instruction
+//this will be 2 when we have performed some maintenance and want to retry the instruction in Thumb (state == 2)
+//this will be 4 when we have performed some maintenance and want to retry the instruction in ARM   (state == 4)
+uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
+  const int THUMB = 2;
+  const int ARM = 4;
+  //Lazy VFP/NEON initialisation and switching
+
+  // (ARM ARM section A7.5) VFP data processing instruction?
+  // (ARM ARM section A7.6) VFP/NEON register load/store instruction?
+  // (ARM ARM section A7.8) VFP/NEON register data transfer instruction?
+  // (ARM ARM section A7.9) VFP/NEON 64-bit register data transfer instruction?
+  if ((state == ARM   && ((opcode & 0x0C000000) >> 26 == 0x03)) ||
+      (state == THUMB && ((opcode & 0xEC000000) >> 26 == 0x3B))) {
+    if (((opcode & 0x00000E00) >> 9) == 5) {
+      __FPU_Enable();
+      return state;
+    }
+  }
+
+  // (ARM ARM section A7.4) NEON data processing instruction?
+  if ((state == ARM   && ((opcode & 0xFE000000) >> 24 == 0xF2)) ||
+      (state == THUMB && ((opcode & 0xEF000000) >> 24 == 0xEF)) ||
+      // (ARM ARM section A7.7) NEON load/store instruction?
+      (state == ARM   && ((opcode >> 24) == 0xF4)) ||
+      (state == THUMB && ((opcode >> 24) == 0xF9))) {
+    __FPU_Enable(); 
+    return state;
+  }
+
+  //Add code here for other Undef cases
+  while(1);
+}