Merge changes from topic "revert-14286-uart_segregation-VURJFOWMTM" into integration
* changes:
Revert "feat(sgi): deviate from arm css common uart related defi..."
Revert "feat(sgi): route TF-A logs via secure uart"
Revert "feat(sgi): add page table translation entry for secure uart"
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 9401811..b7d1168 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -29,6 +29,10 @@
platform contains at least 1 CPU that requires dynamic mitigation.
Defaults to 0.
+- ``WORKAROUND_CVE_2022_23960``: Enables mitigation for `CVE-2022-23960`_.
+ This build option should be set to 1 if the target platform contains at
+ least 1 CPU that requires this mitigation. Defaults to 1.
+
.. _arm_cpu_macros_errata_workarounds:
CPU Errata Workarounds
@@ -585,6 +589,7 @@
.. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
.. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
+.. _CVE-2022-23960: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-23960
.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/index.html
.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/index.html
.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 009eb90..10b0a0b 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -224,6 +224,7 @@
DEFINE_SYSOP_PARAM_FUNC(xpaci)
void flush_dcache_range(uintptr_t addr, size_t size);
+void flush_dcache_to_popa_range(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);
void inv_dcache_range(uintptr_t addr, size_t size);
bool is_dcache_enabled(void);
@@ -274,8 +275,10 @@
DEFINE_SYSOP_TYPE_FUNC(dmb, st)
DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, osh)
DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dsb, oshst)
DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
@@ -610,14 +613,13 @@
}
/*
- * Invalidate cached copies of GPT entries
- * from TLBs by physical address
+ * Invalidate TLBs of GPT entries by Physical address, last level.
*
* @pa: the starting address for the range
* of invalidation
* @size: size of the range of invalidation
*/
-void gpt_tlbi_by_pa(uint64_t pa, size_t size);
+void gpt_tlbi_by_pa_ll(uint64_t pa, size_t size);
/* Previously defined accessor functions with incomplete register names */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index ec62421..09614ee 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,9 @@
#define CORTEX_A710_MIDR U(0x410FD470)
+/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A710_BHB_LOOP_COUNT U(32)
+
/*******************************************************************************
* CPU Extended Control register specific definitions
******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a77.h b/include/lib/cpus/aarch64/cortex_a77.h
index 5753e90..4a87168 100644
--- a/include/lib/cpus/aarch64/cortex_a77.h
+++ b/include/lib/cpus/aarch64/cortex_a77.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,6 +12,9 @@
/* Cortex-A77 MIDR */
#define CORTEX_A77_MIDR U(0x410FD0D0)
+/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A77_BHB_LOOP_COUNT U(24)
+
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 42b0833..f3cb39f 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,9 @@
#define CORTEX_A78_MIDR U(0x410FD410)
+/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_BHB_LOOP_COUNT U(32)
+
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index e3d0fa9..62530e2 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -9,6 +9,9 @@
#define CORTEX_X2_MIDR U(0x410FD480)
+/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X2_BHB_LOOP_COUNT U(32)
+
/*******************************************************************************
* CPU Extended Control register specific definitions
******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h
index b50befa..b6b8d8d 100644
--- a/include/lib/cpus/aarch64/neoverse_n1.h
+++ b/include/lib/cpus/aarch64/neoverse_n1.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,58 +10,61 @@
#include <lib/utils_def.h>
/* Neoverse N1 MIDR for revision 0 */
-#define NEOVERSE_N1_MIDR U(0x410fd0c0)
+#define NEOVERSE_N1_MIDR U(0x410fd0c0)
+
+/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N1_BHB_LOOP_COUNT U(24)
/* Exception Syndrome register EC code for IC Trap */
-#define NEOVERSE_N1_EC_IC_TRAP U(0x1f)
+#define NEOVERSE_N1_EC_IC_TRAP U(0x1f)
/*******************************************************************************
* CPU Power Control register specific definitions.
******************************************************************************/
-#define NEOVERSE_N1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_N1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
/* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
-#define NEOVERSE_N1_CORE_PWRDN_EN_MASK U(0x1)
+#define NEOVERSE_N1_CORE_PWRDN_EN_MASK U(0x1)
-#define NEOVERSE_N1_ACTLR_AMEN_BIT (U(1) << 4)
+#define NEOVERSE_N1_ACTLR_AMEN_BIT (U(1) << 4)
-#define NEOVERSE_N1_AMU_NR_COUNTERS U(5)
-#define NEOVERSE_N1_AMU_GROUP0_MASK U(0x1f)
+#define NEOVERSE_N1_AMU_NR_COUNTERS U(5)
+#define NEOVERSE_N1_AMU_GROUP0_MASK U(0x1f)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
-#define NEOVERSE_N1_CPUECTLR_EL1 S3_0_C15_C1_4
+#define NEOVERSE_N1_CPUECTLR_EL1 S3_0_C15_C1_4
-#define NEOVERSE_N1_WS_THR_L2_MASK (ULL(3) << 24)
+#define NEOVERSE_N1_WS_THR_L2_MASK (ULL(3) << 24)
#define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT (ULL(1) << 51)
#define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT (ULL(1) << 0)
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
-#define NEOVERSE_N1_CPUACTLR_EL1 S3_0_C15_C1_0
+#define NEOVERSE_N1_CPUACTLR_EL1 S3_0_C15_C1_0
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
-#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
-#define NEOVERSE_N1_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define NEOVERSE_N1_CPUACTLR2_EL1 S3_0_C15_C1_1
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
-#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
-#define NEOVERSE_N1_CPUACTLR3_EL1 S3_0_C15_C1_2
+#define NEOVERSE_N1_CPUACTLR3_EL1 S3_0_C15_C1_2
-#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
+#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
/* Instruction patching registers */
-#define CPUPSELR_EL3 S3_6_C15_C8_0
-#define CPUPCR_EL3 S3_6_C15_C8_1
-#define CPUPOR_EL3 S3_6_C15_C8_2
-#define CPUPMR_EL3 S3_6_C15_C8_3
+#define CPUPSELR_EL3 S3_6_C15_C8_0
+#define CPUPCR_EL3 S3_6_C15_C8_1
+#define CPUPOR_EL3 S3_6_C15_C8_2
+#define CPUPMR_EL3 S3_6_C15_C8_3
#endif /* NEOVERSE_N1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index a1e676e..0452b39 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,9 @@
/* Neoverse N2 ID register for revision r0p0 */
#define NEOVERSE_N2_MIDR U(0x410FD490)
+/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N2_BHB_LOOP_COUNT U(32)
+
/*******************************************************************************
* CPU Power control register
******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index e43c907..a904c04 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,9 @@
#define NEOVERSE_V1_MIDR U(0x410FD400)
+/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V1_BHB_LOOP_COUNT U(32)
+
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h
index 379b915..94a88b0 100644
--- a/include/lib/gpt_rme/gpt_rme.h
+++ b/include/lib/gpt_rme/gpt_rme.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -35,6 +35,13 @@
#define GPT_GPI_ANY U(0xF)
#define GPT_GPI_VAL_MASK UL(0xF)
+#define GPT_NSE_SECURE U(0b00)
+#define GPT_NSE_ROOT U(0b01)
+#define GPT_NSE_NS U(0b10)
+#define GPT_NSE_REALM U(0b11)
+
+#define GPT_NSE_SHIFT U(62)
+
/* PAS attribute GPI definitions. */
#define GPT_PAS_ATTR_GPI_SHIFT U(0)
#define GPT_PAS_ATTR_GPI_MASK U(0xF)
@@ -262,15 +269,12 @@
* base: Base address of the region to transition, must be aligned to granule
* size.
* size: Size of region to transition, must be aligned to granule size.
- * src_sec_state: Security state of the caller.
- * target_pas: Target PAS of the specified memory region.
+ * src_sec_state: Security state of the originating SMC invoking the API.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
-int gpt_transition_pas(uint64_t base,
- size_t size,
- unsigned int src_sec_state,
- unsigned int target_pas);
+int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
+int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
#endif /* GPT_RME_H */
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index d1f3847..6faf545 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#include <asm_macros.S>
.globl flush_dcache_range
+ .globl flush_dcache_to_popa_range
.globl clean_dcache_range
.globl inv_dcache_range
.globl dcsw_op_louis
@@ -63,6 +64,35 @@
endfunc inv_dcache_range
+ /*
+ * On implementations with FEAT_MTE2,
+ * Root firmware must issue DC_CIGDPAPA instead of DC_CIPAPA ,
+ * in order to additionally clean and invalidate Allocation Tags
+ * associated with the affected locations.
+ *
+ * ------------------------------------------
+ * Clean+Invalidate by PA to POPA
+ * from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_to_popa_range
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_dc_cipapa
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+ add x1, x1, x0
+loop_dc_cipapa:
+ sys #6, c7, c14, #1, x0 /* DC CIPAPA,<Xt> */
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_dc_cipapa
+ dsb osh
+exit_loop_dc_cipapa:
+ ret
+endfunc flush_dcache_to_popa_range
+
/* ---------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 01531ca..e8110b0 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -15,7 +15,7 @@
.globl zero_normalmem
.globl zeromem
.globl memcpy16
- .globl gpt_tlbi_by_pa
+ .globl gpt_tlbi_by_pa_ll
.globl disable_mmu_el1
.globl disable_mmu_el3
@@ -599,7 +599,7 @@
* TODO: Currently only supports size of 4KB,
* support other sizes as well.
*/
-func gpt_tlbi_by_pa
+func gpt_tlbi_by_pa_ll
#if ENABLE_ASSERTIONS
cmp x1, #PAGE_SIZE_4KB
ASM_ASSERT(eq)
@@ -607,7 +607,7 @@
ASM_ASSERT(eq)
#endif
lsr x0, x0, #FOUR_KB_SHIFT /* 4KB size encoding is zero */
- sys #6, c8, c4, #3, x0 /* TLBI RPAOS, <Xt> */
+ sys #6, c8, c4, #7, x0 /* TLBI RPALOS, <Xt> */
dsb sy
ret
-endfunc gpt_tlbi_by_pa
+endfunc gpt_tlbi_by_pa_ll
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 4d5d949..aea62ae 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <cortex_a710.h>
#include <cpu_macros.S>
#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
#error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Cortex-A710 Erratum 1987031.
* This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
@@ -305,6 +310,15 @@
b cpu_rev_var_ls
endfunc check_errata_2282622
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@@ -344,6 +358,7 @@
report_errata ERRATA_A710_2267065, cortex_a710, 2267065
report_errata ERRATA_A710_2136059, cortex_a710, 2136059
report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@@ -404,6 +419,15 @@
bl errata_a710_2282622_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A710 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a710
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
isb
ret x19
endfunc cortex_a710_reset_func
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 8c8f4d3..e7365e2 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <cortex_a77.h>
#include <cpu_macros.S>
#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Cortex A77 Errata #1508412.
* This applies only to revision <= r1p0 of Cortex A77.
@@ -194,6 +199,15 @@
b cpu_rev_var_ls
endfunc check_errata_1791578
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A77.
* Shall clobber: x0-x19
@@ -224,6 +238,16 @@
bl errata_a77_1791578_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A77 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a77
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
ret x19
endfunc cortex_a77_reset_func
@@ -261,6 +285,7 @@
report_errata ERRATA_A77_1925769, cortex_a77, 1925769
report_errata ERRATA_A77_1946167, cortex_a77, 1946167
report_errata ERRATA_A77_1791578, cortex_a77, 1791578
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index a1288ba..1a6f848 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, ARM Limited. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,12 +10,16 @@
#include <cortex_a78.h>
#include <cpu_macros.S>
#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
/* --------------------------------------------------
* Errata Workaround for A78 Erratum 1688305.
@@ -263,6 +267,15 @@
b cpu_rev_var_range
endfunc check_errata_2242635
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A78
* -------------------------------------------------
@@ -327,6 +340,15 @@
msr CPUAMCNTENSET1_EL0, x0
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A78 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a78
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
isb
ret x19
endfunc cortex_a78_reset_func
@@ -368,6 +390,7 @@
report_errata ERRATA_A78_1952683, cortex_a78, 1952683
report_errata ERRATA_A78_2132060, cortex_a78, 2132060
report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 2ecfbbb..9586a5b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -10,6 +10,7 @@
#include <cortex_x2.h>
#include <cpu_macros.S>
#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
#error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Cortex X2 Errata #2002765.
* This applies to revisions r0p0, r1p0, and r2p0 and
@@ -222,6 +227,16 @@
mov x1, #0x20
b cpu_rev_var_ls
endfunc check_errata_2216384
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
* ----------------------------------------------------
@@ -258,6 +273,7 @@
report_errata ERRATA_X2_2017096, cortex_x2, 2017096
report_errata ERRATA_X2_2081180, cortex_x2, 2081180
report_errata ERRATA_X2_2216384, cortex_x2, 2216384
+ report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@@ -305,6 +321,16 @@
bl errata_x2_2216384_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-X2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_x2
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
ret x19
endfunc cortex_x2_reset_func
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index 9c97cf6..b75b0c1 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,8 +8,8 @@
#include <asm_macros.S>
#include <cpuamu.h>
#include <cpu_macros.S>
-#include <context.h>
#include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -23,6 +23,10 @@
.global neoverse_n1_errata_ic_trap_handler
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Neoverse N1 Erratum 1043202.
* This applies to revision r0p0 and r1p0 of Neoverse N1.
@@ -464,6 +468,15 @@
b cpu_rev_var_range
endfunc check_errata_1946160
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
func neoverse_n1_reset_func
mov x19, x30
@@ -575,6 +588,15 @@
bl errata_dsu_936184_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-N1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_n1
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
isb
ret x19
endfunc neoverse_n1_reset_func
@@ -624,6 +646,7 @@
report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 621aded..b93f2a6 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#include <asm_macros.S>
#include <cpu_macros.S>
#include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -19,6 +20,10 @@
#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Neoverse N2 Erratum 2002655.
* This applies to revision r0p0 of Neoverse N2. it is still open.
@@ -333,6 +338,15 @@
b cpu_rev_var_ls
endfunc check_errata_2280757
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* -------------------------------------------
* The CPU Ops reset function for Neoverse N2.
* -------------------------------------------
@@ -428,6 +442,15 @@
bl errata_n2_2002655_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-N2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_n2
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
isb
ret x19
endfunc neoverse_n2_reset_func
@@ -469,6 +492,7 @@
report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
ldp x8, x30, [sp], #16
ret
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index 62a7a30..6adb3a8 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <neoverse_v1.h>
#include <cpu_macros.S>
#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
/* --------------------------------------------------
* Errata Workaround for Neoverse V1 Errata #1774420.
* This applies to revisions r0p0 and r1p0, fixed in r1p1.
@@ -325,6 +330,15 @@
b cpu_rev_var_range
endfunc check_errata_2216392
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -364,6 +378,7 @@
report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
ldp x8, x30, [sp], #16
ret
@@ -422,6 +437,16 @@
bl errata_neoverse_v1_2216392_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-V1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_v1
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
ret x19
endfunc neoverse_v1_reset_func
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..e0e41cc
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+ /*
+ * This macro applies the mitigation for CVE-2022-23960.
+ * The macro saves x2-x3 to the CPU context.
+ * SP should point to the CPU context.
+ */
+ .macro apply_cve_2022_23960_bhb_wa _bhb_loop_count
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+ /* CVE-BHB-NUM loop count */
+ mov x2, \_bhb_loop_count
+
+1:
+ /* b pc+4 part of the workaround */
+ b 2f
+2:
+ subs x2, x2, #1
+ bne 1b
+ dsb sy
+ isb
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+ /*
+ * This macro is used to isolate the vector table for relevant CPUs
+ * used in the mitigation for CVE_2022_23960.
+ */
+ .macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+ .globl wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+ b sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+ b irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+ b fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+ b serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+ b sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+ b irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+ b fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+ b serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+ .endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e812c07..c7630fb 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -24,6 +24,7 @@
WORKAROUND_CVE_2017_5715 ?=1
WORKAROUND_CVE_2018_3639 ?=1
DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
+WORKAROUND_CVE_2022_23960 ?=1
# Flags to indicate internal or external Last level cache
# By default internal
@@ -56,6 +57,10 @@
$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
+# Process WORKAROUND_CVE_2022_23960 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
+$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
+
$(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
$(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c
index e424fe2..d6fbc04 100644
--- a/lib/gpt_rme/gpt_rme.c
+++ b/lib/gpt_rme/gpt_rme.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -95,9 +95,8 @@
if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
return true;
- } else {
- return false;
}
+ return false;
}
/*
@@ -117,9 +116,8 @@
{
if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
return true;
- } else {
- return false;
}
+ return false;
}
/*
@@ -434,14 +432,14 @@
gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
/* Start index of this region in L0 GPTs */
- idx = pas->base_pa >> GPT_L0_IDX_SHIFT;
+ idx = GPT_L0_IDX(pas->base_pa);
/*
* Determine number of L0 GPT descriptors covered by
* this PAS region and use the count to populate these
* descriptors.
*/
- end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT;
+ end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
/* Generate the needed block descriptors. */
for (; idx < end_idx; idx++) {
@@ -471,8 +469,8 @@
uintptr_t cur_idx;
uintptr_t end_idx;
- cur_idx = cur_pa >> GPT_L0_IDX_SHIFT;
- end_idx = end_pa >> GPT_L0_IDX_SHIFT;
+ cur_idx = GPT_L0_IDX(cur_pa);
+ end_idx = GPT_L0_IDX(end_pa);
assert(cur_idx <= end_idx);
@@ -770,7 +768,7 @@
/* Validate other parameters. */
ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
- if (ret < 0) {
+ if (ret != 0) {
return ret;
}
@@ -849,7 +847,7 @@
if (l1_gpt_cnt > 0) {
ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
l1_gpt_cnt);
- if (ret < 0) {
+ if (ret != 0) {
return ret;
}
@@ -958,55 +956,170 @@
static spinlock_t gpt_lock;
/*
- * Check if caller is allowed to transition a PAS.
+ * A helper to write the value (target_pas << gpi_shift) to the index of
+ * the gpt_l1_addr
+ */
+static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
+ unsigned int gpi_shift, unsigned int idx,
+ unsigned int target_pas)
+{
+ *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
+ *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
+ gpt_l1_addr[idx] = *gpt_l1_desc;
+}
+
+/*
+ * Helper to retrieve the gpt_l1_* information from the base address
+ * returned in gpi_info
+ */
+static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
+{
+ uint64_t gpt_l0_desc, *gpt_l0_base;
+
+ gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
+ gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
+ if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
+ VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
+ VERBOSE(" Base=0x%" PRIx64 "\n", base);
+ return -EINVAL;
+ }
+
+ /* Get the table index and GPI shift from PA. */
+ gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
+ gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
+ gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
+
+ gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
+ gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
+ GPT_L1_GRAN_DESC_GPI_MASK;
+ return 0;
+}
+
+/*
+ * This function is the granule transition delegate service. When a granule
+ * transition request occurs it is routed to this function to have the request,
+ * if valid, fulfilled following A1.1.1 Delegate of RME supplement
*
- * - Secure world caller can only request S <-> NS transitions on a
- * granule that is already in either S or NS PAS.
- *
- * - Realm world caller can only request R <-> NS transitions on a
- * granule that is already in either R or NS PAS.
+ * TODO: implement support for transitioning multiple granules at once.
*
* Parameters
+ * base Base address of the region to transition, must be
+ * aligned to granule size.
+ * size Size of region to transition, must be aligned to granule
+ * size.
* src_sec_state Security state of the caller.
- * current_gpi Current GPI of the granule.
- * target_gpi Requested new GPI for the granule.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
-static int gpt_check_transition_gpi(unsigned int src_sec_state,
- unsigned int current_gpi,
- unsigned int target_gpi)
+int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
{
- unsigned int check_gpi;
+ gpi_info_t gpi_info;
+ uint64_t nse;
+ int res;
+ unsigned int target_pas;
- /* Cannot transition a granule to the state it is already in. */
- if (current_gpi == target_gpi) {
+ /* Ensure that the tables have been set up before taking requests. */
+ assert(gpt_config.plat_gpt_l0_base != 0UL);
+
+ /* Ensure that caches are enabled. */
+ assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
+
+ /* Delegate request can only come from REALM or SECURE */
+ assert(src_sec_state == SMC_FROM_REALM ||
+ src_sec_state == SMC_FROM_SECURE);
+
+ /* See if this is a single or a range of granule transition. */
+ if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
return -EINVAL;
}
- /* Check security state, only secure and realm can transition. */
- if (src_sec_state == SMC_FROM_REALM) {
- check_gpi = GPT_GPI_REALM;
- } else if (src_sec_state == SMC_FROM_SECURE) {
- check_gpi = GPT_GPI_SECURE;
+ /* Check that base and size are valid */
+ if ((ULONG_MAX - base) < size) {
+ VERBOSE("[GPT] Transition request address overflow!\n");
+ VERBOSE(" Base=0x%" PRIx64 "\n", base);
+ VERBOSE(" Size=0x%lx\n", size);
+ return -EINVAL;
+ }
+
+ /* Make sure base and size are valid. */
+ if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+ ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+ (size == 0UL) ||
+ ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
+ VERBOSE("[GPT] Invalid granule transition address range!\n");
+ VERBOSE(" Base=0x%" PRIx64 "\n", base);
+ VERBOSE(" Size=0x%lx\n", size);
+ return -EINVAL;
+ }
+
+ target_pas = GPT_GPI_REALM;
+ if (src_sec_state == SMC_FROM_SECURE) {
+ target_pas = GPT_GPI_SECURE;
+ }
+
+ /*
+ * Access to L1 tables is controlled by a global lock to ensure
+ * that no more than one CPU is allowed to make changes at any
+ * given time.
+ */
+ spin_lock(&gpt_lock);
+ res = get_gpi_params(base, &gpi_info);
+ if (res != 0) {
+ spin_unlock(&gpt_lock);
+ return res;
+ }
+
+ /* Check that the current address is in NS state */
+ if (gpi_info.gpi != GPT_GPI_NS) {
+ VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
+ VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
+ gpi_info.gpi);
+ spin_unlock(&gpt_lock);
+ return -EINVAL;
+ }
+
+ if (src_sec_state == SMC_FROM_SECURE) {
+ nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
} else {
- return -EINVAL;
+ nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
}
- /* Make sure security state is allowed to make the transition. */
- if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) {
- return -EINVAL;
- }
- if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) {
- return -EINVAL;
- }
+ /*
+ * In order to maintain mutual distrust between Realm and Secure
+ * states, remove any data speculatively fetched into the target
+ * physical address space. Issue DC CIPAPA over address range
+ */
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+ write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+ gpi_info.gpi_shift, gpi_info.idx, target_pas);
+ dsboshst();
+
+ gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ dsbosh();
+
+ nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
+
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+ /* Unlock access to the L1 tables. */
+ spin_unlock(&gpt_lock);
+
+ /*
+ * The isb() will be done as part of context
+ * synchronization when returning to lower EL
+ */
+ VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+ base, gpi_info.gpi, target_pas);
return 0;
}
/*
- * This function is the core of the granule transition service. When a granule
+ * This function is the granule transition undelegate service. When a granule
* transition request occurs it is routed to this function where the request is
* validated then fulfilled if possible.
*
@@ -1018,29 +1131,32 @@
* size Size of region to transition, must be aligned to granule
* size.
* src_sec_state Security state of the caller.
- * target_pas Target PAS of the specified memory region.
*
* Return
* Negative Linux error code in the event of a failure, 0 for success.
*/
-int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state,
- unsigned int target_pas)
+int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
{
- int idx;
- unsigned int gpi_shift;
- unsigned int gpi;
- uint64_t gpt_l0_desc;
- uint64_t gpt_l1_desc;
- uint64_t *gpt_l1_addr;
- uint64_t *gpt_l0_base;
+ gpi_info_t gpi_info;
+ uint64_t nse;
+ int res;
/* Ensure that the tables have been set up before taking requests. */
- assert(gpt_config.plat_gpt_l0_base != 0U);
+ assert(gpt_config.plat_gpt_l0_base != 0UL);
- /* Ensure that MMU and data caches are enabled. */
- assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
+ /* Ensure that MMU and caches are enabled. */
+ assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
- /* Check for address range overflow. */
+ /* Delegate request can only come from REALM or SECURE */
+ assert(src_sec_state == SMC_FROM_REALM ||
+ src_sec_state == SMC_FROM_SECURE);
+
+ /* See if this is a single or a range of granule transition. */
+ if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
+ return -EINVAL;
+ }
+
+ /* Check that base and size are valid */
if ((ULONG_MAX - base) < size) {
VERBOSE("[GPT] Transition request address overflow!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
@@ -1049,9 +1165,9 @@
}
/* Make sure base and size are valid. */
- if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
- ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
- (size == 0U) ||
+ if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+ ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
+ (size == 0UL) ||
((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
VERBOSE("[GPT] Invalid granule transition address range!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
@@ -1059,66 +1175,81 @@
return -EINVAL;
}
- /* See if this is a single granule transition or a range of granules. */
- if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
- /*
- * TODO: Add support for transitioning multiple granules with a
- * single call to this function.
- */
- panic();
- }
-
- /* Get the L0 descriptor and make sure it is for a table. */
- gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
- gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
- if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
- VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
- VERBOSE(" Base=0x%" PRIx64 "\n", base);
- return -EINVAL;
- }
-
- /* Get the table index and GPI shift from PA. */
- gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
- idx = GPT_L1_IDX(gpt_config.p, base);
- gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
-
/*
* Access to L1 tables is controlled by a global lock to ensure
* that no more than one CPU is allowed to make changes at any
* given time.
*/
spin_lock(&gpt_lock);
- gpt_l1_desc = gpt_l1_addr[idx];
- gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK;
- /* Make sure caller state and source/target PAS are allowed. */
- if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) {
+ res = get_gpi_params(base, &gpi_info);
+ if (res != 0) {
spin_unlock(&gpt_lock);
- VERBOSE("[GPT] Invalid caller state and PAS combo!\n");
- VERBOSE(" Caller: %u, Current GPI: %u, Target GPI: %u\n",
- src_sec_state, gpi, target_pas);
- return -EPERM;
+ return res;
}
- /* Clear existing GPI encoding and transition granule. */
- gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
- gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
- gpt_l1_addr[idx] = gpt_l1_desc;
+ /* Check that the current address is in the delegated state */
+ if ((src_sec_state == SMC_FROM_REALM &&
+ gpi_info.gpi != GPT_GPI_REALM) ||
+ (src_sec_state == SMC_FROM_SECURE &&
+ gpi_info.gpi != GPT_GPI_SECURE)) {
+ VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
+ VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
+ gpi_info.gpi);
+ spin_unlock(&gpt_lock);
+ return -EINVAL;
+ }
- /* Ensure that the write operation will be observed by GPC */
- dsbishst();
+
+ /* In order to maintain mutual distrust between Realm and Secure
+ * states, remove access now, in order to guarantee that writes
+ * to the currently-accessible physical address space will not
+ * later become observable.
+ */
+ write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+ gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
+ dsboshst();
+
+ gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ dsbosh();
+
+ if (src_sec_state == SMC_FROM_SECURE) {
+ nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
+ } else {
+ nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
+ }
+
+ /* Ensure that the scrubbed data has made it past the PoPA */
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+ /*
+ * Remove any data loaded speculatively
+ * in NS space from before the scrubbing
+ */
+ nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
+
+ flush_dcache_to_popa_range(nse | base,
+ GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+
+ /* Clear existing GPI encoding and transition granule. */
+ write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
+ gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
+ dsboshst();
+
+ /* Ensure that all agents observe the new NS configuration */
+ gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
+ dsbosh();
/* Unlock access to the L1 tables. */
spin_unlock(&gpt_lock);
- gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
- dsbishst();
/*
* The isb() will be done as part of context
* synchronization when returning to lower EL
*/
- VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", base, gpi,
- target_pas);
+ VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
+ base, gpi_info.gpi, GPT_GPI_NS);
return 0;
}
diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h
index 4203bba..3c817f3 100644
--- a/lib/gpt_rme/gpt_rme_private.h
+++ b/lib/gpt_rme/gpt_rme_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -106,6 +106,17 @@
PGS_64KB_P = 16U
} gpt_p_val_e;
+/*
+ * Internal structure to retrieve the values from get_gpi_info();
+ */
+typedef struct gpi_info {
+ uint64_t gpt_l1_desc;
+ uint64_t *gpt_l1_addr;
+ unsigned int idx;
+ unsigned int gpi_shift;
+ unsigned int gpi;
+} gpi_info_t;
+
/* Max valid value for PGS. */
#define GPT_PGS_MAX (2U)
diff --git a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
index 551efe6..b803340 100644
--- a/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
+++ b/plat/arm/board/fvp/fdts/optee_sp_manifest.dts
@@ -40,11 +40,5 @@
pages-count = <1>;
attributes = <0x3>; /* read-write */
};
-
- gicd {
- base-address = <0x00000000 0x2f000000>;
- pages-count = <16>;
- attributes = <0x3>; /* read-write */
- };
};
};
diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c
index f9ee449..a94a4f4 100644
--- a/plat/arm/board/fvp/fvp_bl31_setup.c
+++ b/plat/arm/board/fvp/fvp_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -20,6 +20,9 @@
void __init bl31_early_platform_setup2(u_register_t arg0,
u_register_t arg1, u_register_t arg2, u_register_t arg3)
{
+ /* Initialize the console to provide early debug support */
+ arm_console_boot_init();
+
#if !RESET_TO_BL31 && !BL2_AT_EL3
const struct dyn_cfg_dtb_info_t *soc_fw_config_info;
diff --git a/plat/arm/common/arm_console.c b/plat/arm/common/arm_console.c
index af5f11e..51830c9 100644
--- a/plat/arm/common/arm_console.c
+++ b/plat/arm/common/arm_console.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -25,6 +25,11 @@
/* Initialize the console to provide early debug support */
void __init arm_console_boot_init(void)
{
+ /* If the console was initialized already, don't initialize again */
+ if (arm_boot_console.base == PLAT_ARM_BOOT_UART_BASE) {
+ return;
+ }
+
int rc = console_pl011_register(PLAT_ARM_BOOT_UART_BASE,
PLAT_ARM_BOOT_UART_CLK_IN_HZ,
ARM_CONSOLE_BAUDRATE,
diff --git a/plat/brcm/board/common/board_common.mk b/plat/brcm/board/common/board_common.mk
index 3b3e92d..24a27ed 100644
--- a/plat/brcm/board/common/board_common.mk
+++ b/plat/brcm/board/common/board_common.mk
@@ -214,14 +214,12 @@
endif
# Include mbedtls if it can be located
-MBEDTLS_DIR := mbedtls
-MBEDTLS_CHECK := $(shell find ${MBEDTLS_DIR}/include -name '${MBEDTLS_DIR}')
+MBEDTLS_DIR ?= mbedtls
+MBEDTLS_CHECK := $(shell find ${MBEDTLS_DIR}/include -name '$(notdir ${MBEDTLS_DIR})')
ifneq (${MBEDTLS_CHECK},)
$(info Found mbedTLS at ${MBEDTLS_DIR})
PLAT_INCLUDES += -I${MBEDTLS_DIR}/include/mbedtls
-# Specify mbedTLS configuration file
-MBEDTLS_CONFIG_FILE := "<brcm_mbedtls_config.h>"
# By default, use RSA keys
KEY_ALG := rsa_1_5
diff --git a/plat/st/common/bl2_stm32_io_storage.c b/plat/st/common/bl2_stm32_io_storage.c
index 2d68a50..4391195 100644
--- a/plat/st/common/bl2_stm32_io_storage.c
+++ b/plat/st/common/bl2_stm32_io_storage.c
@@ -379,19 +379,21 @@
stm32_sdmmc2_mmc_get_device_size();
#if STM32MP_EMMC_BOOT
- magic = get_boot_part_ssbl_header();
+ if (mmc_dev_type == MMC_IS_EMMC) {
+ magic = get_boot_part_ssbl_header();
- if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
- VERBOSE("%s, header found, jump to emmc load\n", __func__);
- idx = IMG_IDX_BL33;
- part = &stm32image_dev_info_spec.part_info[idx];
- part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
- part->bkp_offset = 0U;
- mmc_device_spec.use_boot_part = true;
+ if (magic == BOOT_API_IMAGE_HEADER_MAGIC_NB) {
+ VERBOSE("%s, header found, jump to emmc load\n", __func__);
+ idx = IMG_IDX_BL33;
+ part = &stm32image_dev_info_spec.part_info[idx];
+ part->part_offset = PLAT_EMMC_BOOT_SSBL_OFFSET;
+ part->bkp_offset = 0U;
+ mmc_device_spec.use_boot_part = true;
- goto emmc_boot;
- } else {
- WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+ goto emmc_boot;
+ } else {
+ WARN("%s: Can't find STM32 header on a boot partition\n", __func__);
+ }
}
#endif
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
index c4ea706..28d0b01 100644
--- a/services/std_svc/rmmd/rmmd_main.c
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -325,30 +325,6 @@
/* Subscribe to PSCI CPU on to initialize RMM on secondary */
SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
-static int gtsi_transition_granule(uint64_t pa,
- unsigned int src_sec_state,
- unsigned int target_pas)
-{
- int ret;
-
- ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas);
-
- /* Convert TF-A error codes into GTSI error codes */
- if (ret == -EINVAL) {
- ERROR("[GTSI] Transition failed: invalid %s\n", "address");
- ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
- src_sec_state, target_pas);
- ret = GRAN_TRANS_RET_BAD_ADDR;
- } else if (ret == -EPERM) {
- ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
- ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
- src_sec_state, target_pas);
- ret = GRAN_TRANS_RET_BAD_PAS;
- }
-
- return ret;
-}
-
/*******************************************************************************
* This function handles all SMCs in the range reserved for GTF.
******************************************************************************/
@@ -357,6 +333,7 @@
void *handle, uint64_t flags)
{
uint32_t src_sec_state;
+ int ret;
/* Determine which security state this SMC originated from */
src_sec_state = caller_sec_state(flags);
@@ -368,13 +345,27 @@
switch (smc_fid) {
case SMC_ASC_MARK_REALM:
- SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
- GPT_GPI_REALM));
+ ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ break;
case SMC_ASC_MARK_NONSECURE:
- SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
- GPT_GPI_NS));
+ ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ break;
default:
WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid);
SMC_RET1(handle, SMC_UNK);
}
+
+ if (ret == -EINVAL) {
+ ERROR("[GTSI] Transition failed: invalid %s\n", "address");
+ ERROR(" PA: 0x%"PRIx64 ", SRC: %d, PAS: %d\n", x1,
+ SMC_FROM_REALM, smc_fid);
+ ret = GRAN_TRANS_RET_BAD_ADDR;
+ } else if (ret == -EPERM) {
+ ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
+ ERROR(" PA: 0x%"PRIx64 ", SRC: %d, PAS: %d\n", x1,
+ SMC_FROM_REALM, smc_fid);
+ ret = GRAN_TRANS_RET_BAD_PAS;
+ }
+
+ SMC_RET1(handle, ret);
}