Merge changes from topic "for-lts-v2.10.15" into lts-v2.10

* changes:
  fix(cpus): fix clang compilation issue
  fix(cpus): remove errata setting PF_MODE to conservative
  chore(cpus): rearrange the errata and cve in order in Cortex-X4
  chore(cpus): rearrange cve and errata order in Cortex-X3
  chore(cpus): fix cve order in Neoverse-V2
  chore(cpus): rearrange the errata and cve order in Neoverse-N2
  chore(cpus): rearrange the errata and cve in order in Neoverse-V3
  chore(cpus): rearrange the errata and cve in order in Cortex-A710
  chore(cpus): rearrange cve in order in Cortex-X1
  chore(cpus): fix cve order in Neoverse-V1
  chore(cpus): fix cve order in Cortex-X2
  chore(cpus): fix cve order in Cortex-A78C
  chore(cpus): fix cve order in Cortex-A78_AE
  chore(cpus): fix cve order in Cortex-A78
  chore(cpus): fix cve order in Cortex-A77
  refactor(cpus): don't panic if errata out of order
  fix(errata): workaround for Cortex-A510 erratum 2971420
  fix(cpus): workaround for Cortex-A715 erratum 2804830
  fix(errata-abi): add support for handling split workarounds
  refactor(cpus): declare runtime errata correctly
  perf(cpus): make reset errata do fewer branches
  perf(cpus): inline the init_cpu_data_ptr function
  perf(cpus): inline the reset function
  perf(cpus): inline the cpu_get_rev_var call
  perf(cpus): inline cpu_rev_var checks
  refactor(cpus): register DSU errata with the errata framework's wrappers
  refactor(cpus): convert checker functions to standard helpers
  refactor(cpus): convert the Cortex-A65 to use the errata framework
  refactor(cpus): remove cpu specific errata funcs
  refactor(cpus): directly invoke errata reporter
  fix(deps): remove deprecated husky commands
  refactor(cpus): undo errata mitigations
diff --git a/.husky/prepare-commit-msg b/.husky/prepare-commit-msg
index 617400a..e38252e 100755
--- a/.husky/prepare-commit-msg
+++ b/.husky/prepare-commit-msg
@@ -1,8 +1,5 @@
 #!/bin/sh
 
-# shellcheck source=./_/husky.sh
-. "$(dirname "$0")/_/husky.sh"
-
 if ! git config --get tf-a.disableCommitizen > /dev/null; then
     "$(dirname "$0")/prepare-commit-msg.cz" "$@"
 fi
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index 67b4bf8..f871395 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -17,8 +17,7 @@
 				${MBEDTLS_SOURCES}
 
 ifeq (${ARCH},aarch64)
-BL1_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S		\
-				lib/el3_runtime/aarch64/context.S	\
+BL1_SOURCES		+=	lib/el3_runtime/aarch64/context.S	\
 				lib/cpus/errata_common.c
 endif
 
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index b70a3fb..1b944bb 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -43,13 +43,9 @@
 				bl2/${ARCH}/bl2_run_next_image.S        \
 				lib/cpus/${ARCH}/cpu_helpers.S
 
-ifeq (${ARCH},aarch64)
-BL2_SOURCES		+=	lib/cpus/aarch64/dsu_helpers.S
-endif
-
 BL2_DEFAULT_LINKER_SCRIPT_SOURCE := bl2/bl2_el3.ld.S
 endif
 
 ifeq (${ENABLE_PMF},1)
 BL2_SOURCES		+=	lib/pmf/pmf_main.c
-endif
\ No newline at end of file
+endif
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index c1b6718..f0a1988 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -43,7 +43,6 @@
 				bl31/bl31_traps.c				\
 				common/runtime_svc.c				\
 				lib/cpus/errata_common.c			\
-				lib/cpus/aarch64/dsu_helpers.S			\
 				plat/common/aarch64/platform_mp_stack.S		\
 				services/arm_arch_svc/arm_arch_svc_setup.c	\
 				services/std_svc/std_svc_setup.c		\
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index bac11c7..7ad8285 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -311,10 +311,6 @@
 -  ``ERRATA_A78_1952683``: This applies errata 1952683 workaround to Cortex-A78
    CPU. This needs to be enabled for revision r0p0, it is fixed in r1p0.
 
--  ``ERRATA_A78_2132060``: This applies errata 2132060 workaround to Cortex-A78
-   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2. It
-   is still open.
-
 -  ``ERRATA_A78_2242635``: This applies errata 2242635 workaround to Cortex-A78
    CPU. This needs to be enabled for revisions r1p0, r1p1, and r1p2. The issue
    is present in r0p0 but there is no workaround. It is still open.
@@ -377,10 +373,6 @@
   Cortex-A78C CPU. This needs to be enabled for revision r0p0. The erratum is
   fixed in r0p1.
 
-- ``ERRATA_A78C_2132064`` : This applies errata 2132064 workaround to
-  Cortex-A78C CPU. This needs to be enabled for revisions r0p1, r0p2 and
-  it is still open.
-
 - ``ERRATA_A78C_2242638`` : This applies errata 2242638 workaround to
   Cortex-A78C CPU. This needs to be enabled for revisions r0p1, r0p2 and
   it is still open.
@@ -505,10 +497,6 @@
    CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the
    CPU.  It is still open.
 
--  ``ERRATA_V1_2108267``: This applies errata 2108267 workaround to Neoverse-V1
-   CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the CPU.
-   It is still open.
-
 -  ``ERRATA_V1_2216392``: This applies errata 2216392 workaround to Neoverse-V1
    CPU. This needs to be enabled for revisions r1p0 and r1p1 of the CPU, the
    issue is present in r0p0 as well but there is no workaround for that
@@ -545,10 +533,6 @@
 
 For Neoverse V2, the following errata build flags are defined :
 
--  ``ERRATA_V2_2331132``: This applies errata 2331132 workaround to Neoverse-V2
-   CPU. This needs to be enabled for revisions r0p0, r0p1 and r0p2. It is still
-   open.
-
 -  ``ERRATA_V2_2618597``: This applies errata 2618597 workaround to Neoverse-V2
    CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in
    r0p2.
@@ -609,10 +593,6 @@
    Cortex-A710 CPU. This needs to be enabled for revision r2p0 of the CPU and
    is still open.
 
--  ``ERRATA_A710_2058056``: This applies errata 2058056 workaround to
-   Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
-   and r2p1 of the CPU and is still open.
-
 -  ``ERRATA_A710_2267065``: This applies errata 2267065 workaround to
    Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
    of the CPU and is fixed in r2p1.
@@ -686,9 +666,6 @@
 -  ``ERRATA_N2_2138956``: This applies errata 2138956 workaround to Neoverse-N2
    CPU. This needs to be enabled for revision r0p0 of the CPU and is fixed in r0p1.
 
--  ``ERRATA_N2_2138953``: This applies errata 2138953 workaround to Neoverse-N2
-   CPU. This needs to be enabled for revisions r0p0, r0p1, r0p2, r0p3 and is still open.
-
 -  ``ERRATA_N2_2242415``: This applies errata 2242415 workaround to Neoverse-N2
    CPU. This needs to be enabled for revision r0p0 of the CPU and is fixed in r0p1.
 
@@ -752,10 +729,6 @@
    CPU. This needs to be enabled for revisions r0p0, r1p0, and r2p0 of the CPU,
    it is still open.
 
--  ``ERRATA_X2_2058056``: This applies errata 2058056 workaround to Cortex-X2
-   CPU. This needs to be enabled for revisions r0p0, r1p0, r2p0 and r2p1 of the CPU,
-   it is still open.
-
 -  ``ERRATA_X2_2083908``: This applies errata 2083908 workaround to Cortex-X2
    CPU. This needs to be enabled for revision r2p0 of the CPU, it is still open.
 
@@ -806,10 +779,6 @@
 
 For Cortex-X3, the following errata build flags are defined :
 
-- ``ERRATA_X3_2070301``: This applies errata 2070301 workaround to the Cortex-X3
-  CPU. This needs to be enabled only for revisions r0p0, r1p0, r1p1 and r1p2 of
-  the CPU and is still open.
-
 - ``ERRATA_X3_2266875``: This applies errata 2266875 workaround to the Cortex-X3
   CPU. This needs to be enabled only for revisions r0p0 and r1p0 of the CPU, it
   is fixed in r1p1.
@@ -956,6 +925,10 @@
    Cortex-A510 CPU. This needs to be applied to revision r0p0, r0p1, r0p2,
    r0p3, r1p0, r1p1 and r1p2. It is fixed in r1p3.
 
+-  ``ERRATA_A510_2971420``: This applies erratum 2971420 workaround to
+   Cortex-A510 CPU. This needs to be applied to revisions r0p1, r0p2, r0p3,
+   r1p0, r1p1, r1p2 and r1p3 and is still open.
+
 For Cortex-A520, the following errata build flags are defined :
 
 -  ``ERRATA_A520_2630792``: This applies errata 2630792 workaround to
@@ -1001,9 +974,13 @@
    Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0
    and r1p1. It is fixed in r1p2.
 
+-  ``ERRATA_A715_2804830``: This applies errata 2804830 workaround to
+   Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0,
+   r1p1 and r1p2. It is fixed in r1p3.
+
 -  ``ERRATA_A715_3699560``: This applies errata 3699560 workaround to
    Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0,
-   r1p2, r1p3. It is still open.
+   r1p2 and r1p3. It is still open.
 
 For Cortex-A720, the following errata build flags are defined :
 
diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst
index 3fce393..2030fd1 100644
--- a/docs/design/firmware-design.rst
+++ b/docs/design/firmware-design.rst
@@ -247,7 +247,7 @@
 
 -  CPU initialization
 
-   BL1 calls the ``reset_handler()`` function which in turn calls the CPU
+   BL1 calls the ``reset_handler`` macro/function which in turn calls the CPU
    specific reset handler function (see the section: "CPU specific operations
    framework").
 
@@ -1304,7 +1304,7 @@
 
 TF-A implements a framework that allows CPU and platform ports to perform
 actions very early after a CPU is released from reset in both the cold and warm
-boot paths. This is done by calling the ``reset_handler()`` function in both
+boot paths. This is done by calling the ``reset_handler`` macro/function in both
 the BL1 and BL31 images. It in turn calls the platform and CPU specific reset
 handling functions.
 
@@ -1448,7 +1448,9 @@
 handling for that CPU and also any errata workarounds enabled by the platform.
 
 It should be defined using the ``cpu_reset_func_{start,end}`` macros and its
-body may only clobber x0 to x14 with x14 being the cpu_rev parameter.
+body may only clobber x0 to x14 with x14 being the cpu_rev parameter. The cpu
+file should also include a call to ``cpu_reset_prologue`` at the start of the
+file for errata to work correctly.
 
 CPU specific power down sequence
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -2860,7 +2862,7 @@
 
 --------------
 
-*Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.*
 
 .. _SMCCC: https://developer.arm.com/docs/den0028/latest
 .. _PSCI: https://developer.arm.com/documentation/den0022/latest/
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
index d09ad0f..4705673 100644
--- a/include/arch/aarch64/asm_macros.S
+++ b/include/arch/aarch64/asm_macros.S
@@ -8,6 +8,7 @@
 
 #include <arch.h>
 #include <common/asm_macros_common.S>
+#include <lib/cpus/cpu_ops.h>
 #include <lib/spinlock.h>
 
 /*
@@ -317,4 +318,25 @@
 #endif
 	.endm
 
+.macro call_reset_handler
+#if !(defined(IMAGE_BL2) && ENABLE_RME)
+	/* ---------------------------------------------------------------------
+	 * It is a cold boot.
+	 * Perform any processor specific actions upon reset e.g. cache, TLB
+	 * invalidations etc.
+	 * ---------------------------------------------------------------------
+	 */
+	/* The plat_reset_handler can clobber x0 - x18, x30 */
+	bl	plat_reset_handler
+
+	/* Get the matching cpu_ops pointer */
+	bl	get_cpu_ops_ptr
+
+	/* Get the cpu_ops reset handler */
+	ldr	x2, [x0, #CPU_RESET_FUNC]
+
+	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
+	blr	x2
+#endif
+.endm
 #endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
index 9609c0d..19ab529 100644
--- a/include/arch/aarch64/el2_common_macros.S
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -293,7 +293,7 @@
 	 * invalidations etc.
 	 * ---------------------------------------------------------------------
 	 */
-	bl	reset_handler
+	call_reset_handler
 
 	el2_arch_init_common
 
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 26c7578..4917648 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -49,7 +49,9 @@
 	 * due to a NULL TPIDR_EL3.
 	 * ---------------------------------------------------------------------
 	 */
-	bl	init_cpu_data_ptr
+	bl	plat_my_core_pos
+	bl	_cpu_data_by_index
+	msr	tpidr_el3, x0
 #endif /* IMAGE_BL31 */
 
 	/* ---------------------------------------------------------------------
@@ -271,15 +273,7 @@
 	msr	vbar_el3, x0
 	isb
 
-#if !(defined(IMAGE_BL2) && ENABLE_RME)
-	/* ---------------------------------------------------------------------
-	 * It is a cold boot.
-	 * Perform any processor specific actions upon reset e.g. cache, TLB
-	 * invalidations etc.
-	 * ---------------------------------------------------------------------
-	 */
-	bl	reset_handler
-#endif
+	call_reset_handler
 
 	el3_arch_init_common
 
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index 096e0b1..31f8811 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -48,8 +48,7 @@
 	 * _midr:
 	 *	Numeric value expected to read from CPU's MIDR
 	 * _resetfunc:
-	 *	Reset function for the CPU. If there's no CPU reset function,
-	 *	specify CPU_NO_RESET_FUNC
+	 *	Reset function for the CPU
 	 * _power_down_ops:
 	 *	Comma-separated list of functions to perform power-down
 	 *	operatios on the CPU. At least one, and up to
@@ -115,11 +114,6 @@
 	  .popsection
 	.endif
 
-	/*
-	 * Mandatory errata status printing function for CPUs of
-	 * this class.
-	 */
-	.word \_name\()_errata_report
 	.word \_name\()_cpu_str
 
 #ifdef IMAGE_BL32
@@ -130,45 +124,6 @@
 #endif
 	.endm
 
-#if REPORT_ERRATA
-	/*
-	 * Print status of a CPU errata
-	 *
-	 * _chosen:
-	 *	Identifier indicating whether or not a CPU errata has been
-	 *	compiled in.
-	 * _cpu:
-	 *	Name of the CPU
-	 * _id:
-	 *	Errata identifier
-	 * _rev_var:
-	 *	Register containing the combined value CPU revision and variant
-	 *	- typically the return value of cpu_get_rev_var
-	 */
-	.macro report_errata _chosen, _cpu, _id, _rev_var=r4
-	/* Stash a string with errata ID */
-	.pushsection .rodata
-	\_cpu\()_errata_\_id\()_str:
-	.asciz	"\_id"
-	.popsection
-
-	/* Check whether errata applies */
-	mov	r0, \_rev_var
-	bl	check_errata_\_id
-
-	.ifeq \_chosen
-	/*
-	 * Errata workaround has not been compiled in. If the errata would have
-	 * applied had it been compiled in, print its status as missing.
-	 */
-	cmp	r0, #0
-	movne	r0, #ERRATA_MISSING
-	.endif
-	ldr	r1, =\_cpu\()_cpu_str
-	ldr	r2, =\_cpu\()_errata_\_id\()_str
-	bl	errata_print_msg
-	.endm
-#endif
 	/*
 	 * Helper macro that reads the part number of the current CPU and jumps
 	 * to the given label if it matches the CPU MIDR provided.
@@ -217,11 +172,6 @@
 		\_cpu\()_errata_list_start:
 		.endif
 
-		/* unused on AArch32, maintain for portability */
-		.word	0
-		/* TODO(errata ABI): this prevents all checker functions from
-		 * being optimised away. Can be done away with unless the ABI
-		 * needs them */
 		.ifnb \_special
 			.word	check_errata_\_special
 		.elseif \_cve
@@ -233,27 +183,8 @@
 		.word	\_id
 		.hword	\_cve
 		.byte	\_chosen
-		/* TODO(errata ABI): mitigated field for known but unmitigated
-		 * errata*/
-		.byte	0x1
+		.byte	0x0 /* alignment */
 	.popsection
 .endm
 
-/*
- * Maintain compatibility with the old scheme of "each cpu has its own reporter".
- * TODO remove entirely once all cpus have been converted. This includes the
- * cpu_ops entry, as print_errata_status can call this directly for all cpus
- */
-.macro errata_report_shim _cpu:req
-	#if REPORT_ERRATA
-	func \_cpu\()_errata_report
-		push	{r12, lr}
-
-		bl generic_errata_report
-
-		pop	{r12, lr}
-		bx	lr
-	endfunc \_cpu\()_errata_report
-	#endif
-.endm
 #endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/cortex_a510.h b/include/lib/cpus/aarch64/cortex_a510.h
index 337aac3..fb09411 100644
--- a/include/lib/cpus/aarch64/cortex_a510.h
+++ b/include/lib/cpus/aarch64/cortex_a510.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -52,4 +52,12 @@
 #define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_SHIFT		U(18)
 #define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_WIDTH		U(1)
 
+#ifndef __ASSEMBLER__
+
+#if ERRATA_A510_2971420
+long check_erratum_cortex_a510_2971420(long cpu_rev);
+#endif
+
+#endif /* __ASSEMBLER__ */
+
 #endif /* CORTEX_A510_H */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index 650193c..a47a47e 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -52,14 +52,6 @@
 #define CORTEX_A710_CPUACTLR5_EL1_BIT_44			(ULL(1) << 44)
 
 /*******************************************************************************
- * CPU Auxiliary Control register specific definitions.
- ******************************************************************************/
-#define CORTEX_A710_CPUECTLR2_EL1				S3_0_C15_C1_5
-#define CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV			ULL(9)
-#define CPUECTLR2_EL1_PF_MODE_LSB				U(11)
-#define CPUECTLR2_EL1_PF_MODE_WIDTH				U(4)
-
-/*******************************************************************************
  * CPU Selected Instruction Private register specific definitions.
  ******************************************************************************/
 #define CORTEX_A710_CPUPSELR_EL3				S3_6_C15_C8_0
diff --git a/include/lib/cpus/aarch64/cortex_a715.h b/include/lib/cpus/aarch64/cortex_a715.h
index e9bd886..9980214 100644
--- a/include/lib/cpus/aarch64/cortex_a715.h
+++ b/include/lib/cpus/aarch64/cortex_a715.h
@@ -13,20 +13,14 @@
 #define CORTEX_A715_BHB_LOOP_COUNT				U(38)
 
 /*******************************************************************************
- * CPU Auxiliary Control register 1 specific definitions.
+ * CPU Register Mappings
  ******************************************************************************/
+#define CORTEX_A715_CPUCFR_EL1					S3_0_C15_C0_0
 #define CORTEX_A715_CPUACTLR_EL1				S3_0_C15_C1_0
-
-/*******************************************************************************
- * CPU Auxiliary Control register 2 specific definitions.
- ******************************************************************************/
 #define CORTEX_A715_CPUACTLR2_EL1				S3_0_C15_C1_1
-
-/*******************************************************************************
- * CPU Extended Control register specific definitions
- ******************************************************************************/
+#define CORTEX_A715_CPUACTLR3_EL1				S3_0_C15_C1_2
 #define CORTEX_A715_CPUECTLR_EL1				S3_0_C15_C1_4
-
+#define CORTEX_A715_CPUECTLR2_EL1				S3_0_C15_C1_5
 #define CORTEX_A715_CPUPSELR_EL3				S3_6_C15_C8_0
 #define CORTEX_A715_CPUPCR_EL3					S3_6_C15_C8_1
 #define CORTEX_A715_CPUPOR_EL3					S3_6_C15_C8_2
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 2984f82..203bdfd 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -19,9 +19,6 @@
  ******************************************************************************/
 #define CORTEX_A78_CPUECTLR_EL1				S3_0_C15_C1_4
 #define CORTEX_A78_CPUECTLR_EL1_BIT_8			(ULL(1) << 8)
-#define CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV		ULL(3)
-#define CPUECTLR_EL1_PF_MODE_LSB			U(6)
-#define CPUECTLR_EL1_PF_MODE_WIDTH			U(2)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions
diff --git a/include/lib/cpus/aarch64/cortex_a78c.h b/include/lib/cpus/aarch64/cortex_a78c.h
index d600eca..2033120 100644
--- a/include/lib/cpus/aarch64/cortex_a78c.h
+++ b/include/lib/cpus/aarch64/cortex_a78c.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,8 +24,6 @@
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
 #define CORTEX_A78C_CPUECTLR_EL1		        S3_0_C15_C1_4
-#define CORTEX_A78C_CPUECTLR_EL1_BIT_6		        (ULL(1) << 6)
-#define CORTEX_A78C_CPUECTLR_EL1_BIT_7		        (ULL(1) << 7)
 #define CORTEX_A78C_CPUECTLR_EL1_MM_ASP_EN		(ULL(1) << 53)
 
 /*******************************************************************************
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index 9ec5177..4516339 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -19,15 +19,6 @@
 #define CORTEX_X2_CPUECTLR_EL1_PFSTIDIS_BIT			(ULL(1) << 8)
 
 /*******************************************************************************
- * CPU Extended Control register 2 specific definitions
- ******************************************************************************/
-#define CORTEX_X2_CPUECTLR2_EL1					S3_0_C15_C1_5
-
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT			U(11)
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH			U(4)
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV			ULL(0x9)
-
-/*******************************************************************************
  * CPU Auxiliary Control register 3 specific definitions.
  ******************************************************************************/
 #define CORTEX_X2_CPUACTLR3_EL1				S3_0_C15_C1_2
diff --git a/include/lib/cpus/aarch64/cortex_x3.h b/include/lib/cpus/aarch64/cortex_x3.h
index 8834db1..2869ec8 100644
--- a/include/lib/cpus/aarch64/cortex_x3.h
+++ b/include/lib/cpus/aarch64/cortex_x3.h
@@ -49,15 +49,6 @@
 #define CORTEX_X3_CPUACTLR6_EL1			S3_0_C15_C8_1
 
 /*******************************************************************************
- * CPU Extended Control register 2 specific definitions.
- ******************************************************************************/
-#define CORTEX_X3_CPUECTLR2_EL1			S3_0_C15_C1_5
-
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB	U(11)
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH	U(4)
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV	ULL(0x9)
-
-/*******************************************************************************
  * CPU Auxiliary Control register 3 specific definitions.
  ******************************************************************************/
 #define CORTEX_X3_CPUACTLR3_EL1			S3_0_C15_C1_2
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 19f45e8..3c01893 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -49,8 +49,7 @@
 	 * _midr:
 	 *	Numeric value expected to read from CPU's MIDR
 	 * _resetfunc:
-	 *	Reset function for the CPU. If there's no CPU reset function,
-	 *	specify CPU_NO_RESET_FUNC
+	 *	Reset function for the CPU.
 	 * _extra1:
 	 *	This is a placeholder for future per CPU operations.  Currently,
 	 *	some CPUs use this entry to set a test function to determine if
@@ -138,12 +137,6 @@
 	  .popsection
 	.endif
 
-
-	/*
-	 * Mandatory errata status printing function for CPUs of
-	 * this class.
-	 */
-	.quad \_name\()_errata_report
 	.quad \_name\()_cpu_str
 
 #ifdef IMAGE_BL31
@@ -184,49 +177,6 @@
 			\_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
 	.endm
 
-/* TODO can be deleted once all CPUs have been converted */
-#if REPORT_ERRATA
-	/*
-	 * Print status of a CPU errata
-	 *
-	 * _chosen:
-	 *	Identifier indicating whether or not a CPU errata has been
-	 *	compiled in.
-	 * _cpu:
-	 *	Name of the CPU
-	 * _id:
-	 *	Errata identifier
-	 * _rev_var:
-	 *	Register containing the combined value CPU revision and variant
-	 *	- typically the return value of cpu_get_rev_var
-	 */
-	.macro report_errata _chosen, _cpu, _id, _rev_var=x8
-	/* Stash a string with errata ID */
-	.pushsection .rodata
-	\_cpu\()_errata_\_id\()_str:
-	.asciz	"\_id"
-	.popsection
-
-	/* Check whether errata applies */
-	mov	x0, \_rev_var
-	/* Shall clobber: x0-x7 */
-	bl	check_errata_\_id
-
-	.ifeq \_chosen
-	/*
-	 * Errata workaround has not been compiled in. If the errata would have
-	 * applied had it been compiled in, print its status as missing.
-	 */
-	cbz	x0, 900f
-	mov	x0, #ERRATA_MISSING
-	.endif
-900:
-	adr	x1, \_cpu\()_cpu_str
-	adr	x2, \_cpu\()_errata_\_id\()_str
-	bl	errata_print_msg
-	.endm
-#endif
-
 	/*
 	 * This macro is used on some CPUs to detect if they are vulnerable
 	 * to CVE-2017-5715.
@@ -285,53 +235,27 @@
  * _chosen:
  *	Compile time flag on whether the erratum is included
  *
- * _apply_at_reset:
- *	Whether the erratum should be automatically applied at reset
+ * _split_wa:
+ *	Flag that indicates whether an erratum has split workaround or not.
+ *	Default value is 0.
  */
-.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
+.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
+#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
 	.pushsection .rodata.errata_entries
 		.align	3
 		.ifndef \_cpu\()_errata_list_start
 		\_cpu\()_errata_list_start:
 		.endif
 
-		/* check if unused and compile out if no references */
-		.if \_apply_at_reset && \_chosen
-			.quad	erratum_\_cpu\()_\_id\()_wa
-		.else
-			.quad	0
-		.endif
-		/* TODO(errata ABI): this prevents all checker functions from
-		 * being optimised away. Can be done away with unless the ABI
-		 * needs them */
 		.quad	check_erratum_\_cpu\()_\_id
 		/* Will fit CVEs with up to 10 character in the ID field */
 		.word	\_id
 		.hword	\_cve
-		.byte	\_chosen
-		/* TODO(errata ABI): mitigated field for known but unmitigated
-		 * errata */
-		.byte	0x1
+		/* bit magic that appends chosen field based on _split_wa */
+		.byte	((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
+		.byte	0x0 /* alignment */
 	.popsection
-.endm
-
-.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
-	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
-
-	func erratum_\_cpu\()_\_id\()_wa
-		mov	x8, x30
-
-		/* save rev_var for workarounds that might need it but don't
-		 * restore to x0 because few will care */
-		mov	x7, x0
-		bl	check_erratum_\_cpu\()_\_id
-		cbz	x0, erratum_\_cpu\()_\_id\()_skip
-.endm
-
-.macro _workaround_end _cpu:req, _id:req
-	erratum_\_cpu\()_\_id\()_skip:
-		ret	x8
-	endfunc erratum_\_cpu\()_\_id\()_wa
+#endif
 .endm
 
 /*******************************************************************************
@@ -354,14 +278,35 @@
  * _chosen:
  *	Compile time flag on whether the erratum is included
  *
+ * _split_wa:
+ *	Flag that indicates whether an erratum has split workaround or not.
+ *	Default value is 0.
+ *
  * in body:
  *	clobber x0 to x7 (please only use those)
  *	argument x7 - cpu_rev_var
  *
  * _wa clobbers: x0-x8 (PCS compliant)
  */
-.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
-	_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
+.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
+	_chosen:req, _split_wa=0
+
+	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
+
+	.if \_chosen
+		/* put errata directly into the reset function */
+		.pushsection .text.asm.\_cpu\()_reset_func, "ax"
+	.else
+		/* or something else that will get garbage collected by the
+		 * linker */
+		.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
+	.endif
+		/* revision is stored in x14, get it */
+		mov	x0, x14
+		bl	check_erratum_\_cpu\()_\_id
+		/* save rev_var for workarounds that might need it */
+		mov	x7, x14
+		cbz	x0, erratum_\_cpu\()_\_id\()_skip_reset
 .endm
 
 /*
@@ -372,6 +317,10 @@
  *	for errata applied in generic code
  */
 .macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
+	add_erratum_entry \_cpu, \_cve, \_id, \_chosen
+
+	func erratum_\_cpu\()_\_id\()_wa
+		mov	x8, x30
 	/*
 	 * Let errata specify if they need MIDR checking. Sadly, storing the
 	 * MIDR in an .equ to retrieve automatically blows up as it stores some
@@ -379,11 +328,15 @@
 	 */
 	.ifnb \_midr
 		jump_if_cpu_midr \_midr, 1f
-		b	erratum_\_cpu\()_\_id\()_skip
+		b	erratum_\_cpu\()_\_id\()_skip_runtime
 
 		1:
 	.endif
-	_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
+		/* save rev_var for workarounds that might need it but don't
+		 * restore to x0 because few will care */
+		mov	x7, x0
+		bl	check_erratum_\_cpu\()_\_id
+		cbz	x0, erratum_\_cpu\()_\_id\()_skip_runtime
 .endm
 
 /*
@@ -391,7 +344,8 @@
  * is kept here so the same #define can be used as that macro
  */
 .macro workaround_reset_end _cpu:req, _cve:req, _id:req
-	_workaround_end \_cpu, \_id
+	erratum_\_cpu\()_\_id\()_skip_reset:
+	.popsection
 .endm
 
 /*
@@ -411,7 +365,9 @@
 	.ifb \_no_isb
 		isb
 	.endif
-	_workaround_end \_cpu, \_id
+	erratum_\_cpu\()_\_id\()_skip_runtime:
+		ret	x8
+	endfunc erratum_\_cpu\()_\_id\()_wa
 .endm
 
 /*******************************************************************************
@@ -470,6 +426,29 @@
 .endm
 
 /*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ *
+ * _res:
+ *	register where the result will be placed
+ * _tmp:
+ *	register to clobber for temporaries
+ */
+.macro get_rev_var _res:req, _tmp:req
+	mrs	\_tmp, midr_el1
+
+	/*
+	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
+	 * as variant[7:4] and revision[3:0] of x0.
+	 *
+	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
+	 * extract x1[3:0] into x0[3:0] retaining other bits.
+	 */
+	ubfx	\_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+	bfxil	\_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+.endm
+
+/*
  * Apply erratum
  *
  * _cpu:
@@ -492,7 +471,7 @@
  * clobbers: x0-x10 (PCS compliant)
  */
 .macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
-	.if (\_chosen & \_get_rev)
+	.if (\_chosen && \_get_rev)
 		mov	x9, x30
 		bl	cpu_get_rev_var
 		mov	x10, x0
@@ -508,8 +487,69 @@
 .endm
 
 /*
- * Helpers to select which revisions errata apply to. Don't leave a link
- * register as the cpu_rev_var_*** will call the ret and we can save on one.
+ * Helpers to report if an erratum applies. Compares the given revision variant
+ * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
+ *
+ * _rev_num: the given revision variant. Or
+ * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
+ *
+ * in body:
+ *	clobber: x0
+ *	argument: x0 - cpu_rev_var
+ */
+.macro cpu_rev_var_ls _rev_num:req
+	cmp	x0, #\_rev_num
+	cset	x0, ls
+.endm
+
+.macro cpu_rev_var_hs _rev_num:req
+	cmp	x0, #\_rev_num
+	cset	x0, hs
+.endm
+
+.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
+	cmp	x0, #\_rev_num_lo
+	mov	x1, #\_rev_num_hi
+	ccmp	x0, x1, #2, hs
+	cset	x0, ls
+.endm
+
+
+#if __clang_major__ < 17
+/*
+ * A problem with clang version < 17 can cause resolving nested
+ * 'cfi_startproc' to fail compilation.
+ * So add a compatibility variant for start and endfunc expansions
+ * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
+ * check_errata/reset macros if we build TF-A with clang version < 17
+ */
+
+.macro func_compat _name, _align=2
+	.section .text.asm.\_name, "ax"
+	.type \_name, %function
+	.align \_align
+	\_name:
+#if ENABLE_BTI
+	bti	jc
+#endif
+.endm
+
+/*
+ * This macro is used to mark the end of a function.
+ */
+.macro endfunc_compat _name
+	.size \_name, . - \_name
+.endm
+
+#else
+
+#define func_compat func
+#define endfunc_compat endfunc
+
+#endif /* __clang_version__ < 17 */
+
+/*
+ * Helpers to select which revisions errata apply to.
  *
  * _cpu:
  *	Name of cpu as given to declare_cpu_ops
@@ -525,58 +565,73 @@
  *	Revision to apply to
  *
  * in body:
- *	clobber: x0 to x4
+ *	clobber: x0 to x1
  *	argument: x0 - cpu_rev_var
  */
 .macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
-	func check_erratum_\_cpu\()_\_id
-		mov	x1, #\_rev_num
-		b	cpu_rev_var_ls
-	endfunc check_erratum_\_cpu\()_\_id
+	func_compat check_erratum_\_cpu\()_\_id
+		cpu_rev_var_ls \_rev_num
+		ret
+	endfunc_compat check_erratum_\_cpu\()_\_id
 .endm
 
 .macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
-	func check_erratum_\_cpu\()_\_id
-		mov	x1, #\_rev_num
-		b	cpu_rev_var_hs
-	endfunc check_erratum_\_cpu\()_\_id
+	func_compat check_erratum_\_cpu\()_\_id
+		cpu_rev_var_hs \_rev_num
+		ret
+	endfunc_compat check_erratum_\_cpu\()_\_id
 .endm
 
 .macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
-	func check_erratum_\_cpu\()_\_id
-		mov	x1, #\_rev_num_lo
-		mov	x2, #\_rev_num_hi
-		b	cpu_rev_var_range
-	endfunc check_erratum_\_cpu\()_\_id
+	func_compat check_erratum_\_cpu\()_\_id
+		cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
+		ret
+	endfunc_compat check_erratum_\_cpu\()_\_id
 .endm
 
 .macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
-	func check_erratum_\_cpu\()_\_id
+	func_compat check_erratum_\_cpu\()_\_id
 		.if \_chosen
 			mov	x0, #ERRATA_APPLIES
 		.else
 			mov	x0, #ERRATA_MISSING
 		.endif
 		ret
-	endfunc check_erratum_\_cpu\()_\_id
+	endfunc_compat check_erratum_\_cpu\()_\_id
 .endm
 
-/* provide a shorthand for the name format for annoying errata */
+/*
+ * provide a shorthand for the name format for annoying errata
+ * body: clobber x0 to x4
+ */
 .macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
-	func check_erratum_\_cpu\()_\_id
+	func_compat check_erratum_\_cpu\()_\_id
 .endm
 
 .macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
-	endfunc check_erratum_\_cpu\()_\_id
+	endfunc_compat check_erratum_\_cpu\()_\_id
 .endm
 
-
 /*******************************************************************************
  * CPU reset function wrapper
  ******************************************************************************/
 
 /*
- * Wrapper to automatically apply all reset-time errata. Will end with an isb.
+ * Helper to register a cpu with the errata framework. Begins the definition of
+ * the reset function.
+ *
+ * _cpu:
+ *	Name of cpu as given to declare_cpu_ops
+ */
+.macro cpu_reset_prologue _cpu:req
+	func_compat \_cpu\()_reset_func
+		mov	x15, x30
+		get_rev_var x14, x0
+.endm
+
+/*
+ * Wrapper of the reset function to automatically apply all reset-time errata.
+ * Will end with an isb.
  *
  * _cpu:
  *	Name of cpu as given to declare_cpu_ops
@@ -586,64 +641,15 @@
  *	argument x14 - cpu_rev_var
  */
 .macro cpu_reset_func_start _cpu:req
-	func \_cpu\()_reset_func
-		mov	x15, x30
-		bl	cpu_get_rev_var
-		mov	x14, x0
-
-		/* short circuit the location to avoid searching the list */
-		adrp	x12, \_cpu\()_errata_list_start
-		add	x12, x12, :lo12:\_cpu\()_errata_list_start
-		adrp	x13, \_cpu\()_errata_list_end
-		add	x13, x13, :lo12:\_cpu\()_errata_list_end
-
-	errata_begin:
-		/* if head catches up with end of list, exit */
-		cmp	x12, x13
-		b.eq	errata_end
-
-		ldr	x10, [x12, #ERRATUM_WA_FUNC]
-		/* TODO(errata ABI): check mitigated and checker function fields
-		 * for 0 */
-		ldrb	w11, [x12, #ERRATUM_CHOSEN]
-
-		/* skip if not chosen */
-		cbz	x11, 1f
-		/* skip if runtime erratum */
-		cbz	x10, 1f
-
-		/* put cpu revision in x0 and call workaround */
-		mov	x0, x14
-		blr	x10
-	1:
-		add	x12, x12, #ERRATUM_ENTRY_SIZE
-		b	errata_begin
-	errata_end:
+	/* the func/endfunc macros will change sections. So change the section
+	 * back to the reset function's */
+	.section .text.asm.\_cpu\()_reset_func, "ax"
 .endm
 
 .macro cpu_reset_func_end _cpu:req
 		isb
 		ret	x15
-	endfunc \_cpu\()_reset_func
+	endfunc_compat \_cpu\()_reset_func
 .endm
 
-/*
- * Maintain compatibility with the old scheme of each cpu has its own reporting.
- * TODO remove entirely once all cpus have been converted. This includes the
- * cpu_ops entry, as print_errata_status can call this directly for all cpus
- */
-.macro errata_report_shim _cpu:req
-	#if REPORT_ERRATA
-	func \_cpu\()_errata_report
-		/* normal stack frame for pretty debugging */
-		stp	x29, x30, [sp, #-16]!
-		mov	x29, sp
-
-		bl	generic_errata_report
-
-		ldp	x29, x30, [sp], #16
-		ret
-	endfunc \_cpu\()_errata_report
-	#endif
-.endm
 #endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h
index 577de61..a251bec 100644
--- a/include/lib/cpus/aarch64/dsu_def.h
+++ b/include/lib/cpus/aarch64/dsu_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -39,4 +39,23 @@
  ********************************************************************/
 #define DSU_ERRATA_936184_MASK	(U(0x3) << 15)
 
+#define CPUCFR_EL1		S3_0_C15_C0_0
+/* SCU bit of CPU Configuration Register, EL1 */
+#define SCU_SHIFT		U(2)
+
+#ifndef __ASSEMBLER__
+DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrctlr_el1, CLUSTERPWRCTLR_EL1);
+
+/* ---------------------------------------------
+ * controls power features of the cluster
+ * 1. Cache portion power not request
+ * 2. Disable the retention circuit
+ * ---------------------------------------------
+ */
+static inline void dsu_pwr_dwn(void)
+{
+	write_clusterpwrctlr_el1(0);
+	isb();
+}
+#endif
 #endif /* DSU_DEF_H */
diff --git a/include/lib/cpus/aarch64/dsu_macros.S b/include/lib/cpus/aarch64/dsu_macros.S
new file mode 100644
index 0000000..6c8cb69
--- /dev/null
+++ b/include/lib/cpus/aarch64/dsu_macros.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019-2025, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DSU_MACROS_S
+#define DSU_MACROS_S
+
+#include <asm_macros.S>
+#include <dsu_def.h>
+#include <lib/cpus/errata.h>
+
+.macro check_errata_dsu_798953_impl
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+
+	/* Check if DSU is equal to r0p0 */
+	mrs	x1, CLUSTERIDR_EL1
+
+	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
+			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+	mov	x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
+	cmp	x0, x1
+	csel	x0, x2, x3, EQ
+.endm
+
+.macro errata_dsu_798953_wa_impl
+	/* If erratum applies, disable high-level clock gating */
+	mrs	x0, CLUSTERACTLR_EL1
+	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
+	msr	CLUSTERACTLR_EL1, x0
+.endm
+
+.macro branch_if_scu_not_present _target:req
+	/* Check if the SCU L3 Unit is present on the DSU */
+	mrs	x0, CPUCFR_EL1
+	ubfx	x0, x0, #SCU_SHIFT, #1
+	eor	x0, x0, #1
+	/* If SCU is not present, return without applying patch */
+	cmp	x0, xzr
+	mov	x0, #ERRATA_NOT_APPLIES
+	b.eq	\_target
+.endm
+
+.macro check_errata_dsu_936184_impl
+	mov	x0, #ERRATA_NOT_APPLIES
+	/* Erratum applies only if DSU has the ACP interface */
+	mrs	x1, CLUSTERCFR_EL1
+	ubfx	x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
+	cbz	x1, 1f
+
+	/* If ACP is present, check if DSU is older than r2p0 */
+	mrs	x1, CLUSTERIDR_EL1
+
+	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+	ubfx	x2, x1, #CLUSTERIDR_REV_SHIFT,\
+			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+	cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+	b.hs	1f
+	mov	x0, #ERRATA_APPLIES
+1:
+.endm
+
+.macro errata_dsu_936184_wa_impl
+	/* If erratum applies, we set a mask to a DSU control register */
+	mrs	x0, CLUSTERACTLR_EL1
+	ldr	x1, =DSU_ERRATA_936184_MASK
+	orr	x0, x0, x1
+	msr	CLUSTERACTLR_EL1, x0
+.endm
+
+.macro check_errata_dsu_2313941_impl
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+
+	/* Check if DSU version is less than or equal to r3p1 */
+	mrs	x1, CLUSTERIDR_EL1
+
+	mov	x0, #ERRATA_NOT_APPLIES
+	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
+			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+	mov	x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+	cmp	x0, x1
+	csel	x0, x2, x3, LS
+1:
+.endm
+
+.macro errata_dsu_2313941_wa_impl
+	/* If erratum applies, disable high-level clock gating */
+	mrs	x0, CLUSTERACTLR_EL1
+	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+	msr	CLUSTERACTLR_EL1, x0
+.endm
+#endif /* DSU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index f5837d4..e4487c4 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -62,9 +62,6 @@
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
 #define NEOVERSE_N2_CPUECTLR2_EL1			S3_0_C15_C1_5
-#define NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV		ULL(9)
-#define CPUECTLR2_EL1_PF_MODE_LSB			U(11)
-#define CPUECTLR2_EL1_PF_MODE_WIDTH			U(4)
 #define CPUECTLR2_EL1_TXREQ_STATIC_FULL 		ULL(0)
 #define CPUECTLR2_EL1_TXREQ_LSB				U(0)
 #define CPUECTLR2_EL1_TXREQ_WIDTH			U(3)
diff --git a/include/lib/cpus/aarch64/neoverse_n_common.h b/include/lib/cpus/aarch64/neoverse_n_common.h
deleted file mode 100644
index 7cb91cd..0000000
--- a/include/lib/cpus/aarch64/neoverse_n_common.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef NEOVERSE_N_COMMON_H
-#define NEOVERSE_N_COMMON_H
-
-/******************************************************************************
- * Neoverse Nx CPU Configuration register definitions
- *****************************************************************************/
-#define CPUCFR_EL1		S3_0_C15_C0_0
-
-/* SCU bit of CPU Configuration Register, EL1 */
-#define SCU_SHIFT		U(2)
-
-#endif /* NEOVERSE_N_COMMON_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index 1e2d7ea..bbba2a7 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -22,9 +22,6 @@
 #define NEOVERSE_V1_CPUPCR_EL3					S3_6_C15_C8_1
 #define NEOVERSE_V1_CPUECTLR_EL1_BIT_8				(ULL(1) << 8)
 #define NEOVERSE_V1_CPUECTLR_EL1_BIT_53				(ULL(1) << 53)
-#define NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV			ULL(3)
-#define CPUECTLR_EL1_PF_MODE_LSB				U(6)
-#define CPUECTLR_EL1_PF_MODE_WIDTH				U(2)
 
 /*******************************************************************************
  * CPU Power Control register specific definitions
diff --git a/include/lib/cpus/aarch64/neoverse_v2.h b/include/lib/cpus/aarch64/neoverse_v2.h
index a0e7130..fc89334 100644
--- a/include/lib/cpus/aarch64/neoverse_v2.h
+++ b/include/lib/cpus/aarch64/neoverse_v2.h
@@ -31,9 +31,6 @@
  * CPU Extended Control register 2 specific definitions.
  ******************************************************************************/
 #define NEOVERSE_V2_CPUECTLR2_EL1			S3_0_C15_C1_5
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV		ULL(9)
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB		U(11)
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH		U(4)
 #define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_STATIC_FULL	ULL(0)
 #define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_LSB		U(0)
 #define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_WIDTH		U(3)
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index 3fce66a..0b08919 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -21,8 +21,6 @@
 
 /* The number of CPU operations allowed */
 #define CPU_MAX_PWR_DWN_OPS		2
-/* Special constant to specify that CPU has no reset function */
-#define CPU_NO_RESET_FUNC		0
 
 #if __aarch64__
 #define CPU_NO_EXTRA1_FUNC		0
@@ -59,7 +57,6 @@
 #define CPU_ERRATA_LIST_END_SIZE	CPU_WORD_SIZE
 /* Fields required to print errata status  */
 #if REPORT_ERRATA
-#define CPU_ERRATA_FUNC_SIZE	CPU_WORD_SIZE
 #define CPU_CPU_STR_SIZE	CPU_WORD_SIZE
 /* BL1 doesn't require mutual exclusion and printed flag. */
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
@@ -70,7 +67,6 @@
 #define CPU_ERRATA_PRINTED_SIZE	0
 #endif /* defined(IMAGE_BL31) || defined(IMAGE_BL32) */
 #else
-#define CPU_ERRATA_FUNC_SIZE	0
 #define CPU_CPU_STR_SIZE	0
 #define CPU_ERRATA_LOCK_SIZE	0
 #define CPU_ERRATA_PRINTED_SIZE	0
@@ -101,8 +97,7 @@
 #endif /* __aarch64__ */
 #define CPU_ERRATA_LIST_START	CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
 #define CPU_ERRATA_LIST_END	CPU_ERRATA_LIST_START + CPU_ERRATA_LIST_START_SIZE
-#define CPU_ERRATA_FUNC		CPU_ERRATA_LIST_END + CPU_ERRATA_LIST_END_SIZE
-#define CPU_CPU_STR		CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
+#define CPU_CPU_STR		CPU_ERRATA_LIST_END + CPU_ERRATA_LIST_END_SIZE
 #define CPU_ERRATA_LOCK		CPU_CPU_STR + CPU_CPU_STR_SIZE
 #define CPU_ERRATA_PRINTED	CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE
 #if __aarch64__
@@ -134,7 +129,6 @@
 	void *errata_list_start;
 	void *errata_list_end;
 #if REPORT_ERRATA
-	void (*errata_func)(void);
 	char *cpu_str;
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
 	spinlock_t *errata_lock;
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index e0ea741..8e28d46 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -9,20 +9,18 @@
 
 #include <lib/cpus/cpu_ops.h>
 
-#define ERRATUM_WA_FUNC_SIZE	CPU_WORD_SIZE
 #define ERRATUM_CHECK_FUNC_SIZE	CPU_WORD_SIZE
 #define ERRATUM_ID_SIZE		4
 #define ERRATUM_CVE_SIZE	2
 #define ERRATUM_CHOSEN_SIZE	1
-#define ERRATUM_MITIGATED_SIZE	1
+#define ERRATUM_ALIGNMENT_SIZE	1
 
-#define ERRATUM_WA_FUNC		0
-#define ERRATUM_CHECK_FUNC	ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE
+#define ERRATUM_CHECK_FUNC	0
 #define ERRATUM_ID		ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
 #define ERRATUM_CVE		ERRATUM_ID + ERRATUM_ID_SIZE
 #define ERRATUM_CHOSEN		ERRATUM_CVE + ERRATUM_CVE_SIZE
-#define ERRATUM_MITIGATED	ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
-#define ERRATUM_ENTRY_SIZE	ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
+#define ERRATUM_ALIGNMENT	ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
+#define ERRATUM_ENTRY_SIZE	ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE
 
 /* Errata status */
 #define ERRATA_NOT_APPLIES	0
@@ -33,22 +31,25 @@
 #include <lib/cassert.h>
 
 void print_errata_status(void);
-void errata_print_msg(unsigned int status, const char *cpu, const char *id);
 
 /*
  * NOTE that this structure will be different on AArch32 and AArch64. The
  * uintptr_t will reflect the change and the alignment will be correct in both.
  */
 struct erratum_entry {
-	uintptr_t (*wa_func)(uint64_t cpu_rev);
 	uintptr_t (*check_func)(uint64_t cpu_rev);
 	/* Will fit CVEs with up to 10 character in the ID field */
 	uint32_t id;
 	/* Denote CVEs with their year or errata with 0 */
 	uint16_t cve;
+	/*
+	 * a bitfield:
+	 * bit 0 - denotes if the erratum is enabled in build.
+	 * bit 1 - denotes if the erratum workaround is split and
+	 * 	   also needs to be implemented at a lower EL.
+	 */
 	uint8_t chosen;
-	/* TODO(errata ABI): placeholder for the mitigated field */
-	uint8_t _mitigated;
+	uint8_t _alignment;
 } __packed;
 
 CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,
@@ -66,10 +67,8 @@
 }
 #endif
 
-#if ERRATA_A520_2938996 || ERRATA_X4_2726228
-unsigned int check_if_affected_core(void);
-#endif
 
+bool check_if_trbe_disable_affected_core(void);
 int check_wa_cve_2024_7881(void);
 bool errata_ich_vmcr_el2_applies(void);
 
@@ -101,4 +100,11 @@
 /* Macro to get CPU revision code for checking errata version compatibility. */
 #define CPU_REV(r, p)		((r << 4) | p)
 
+/* Used for errata that have split workaround */
+#define SPLIT_WA			1
+
+/* chosen bitfield entries */
+#define WA_ENABLED_MASK			BIT(0)
+#define SPLIT_WA_MASK			BIT(1)
+
 #endif /* ERRATA_H */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
index 2c7b619..8b302b2 100644
--- a/include/lib/el3_runtime/cpu_data.h
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -213,7 +213,6 @@
  * APIs for initialising and accessing per-cpu data
  *************************************************************************/
 
-void init_cpu_data_ptr(void);
 void init_cpu_ops(void);
 
 #define get_cpu_data(_m)		   _cpu_data()->_m
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index 9f45e38..a424575 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -40,16 +40,11 @@
 	b	dcsw_op_all
 endfunc aem_generic_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for AEM. Must follow AAPCS.
- */
-func aem_generic_errata_report
+func aem_generic_reset_func
 	bx	lr
-endfunc aem_generic_errata_report
-#endif
+endfunc aem_generic_reset_func
 
 /* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \
 	aem_generic_core_pwr_dwn, \
 	aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
index 8eec27c..b95020e 100644
--- a/lib/cpus/aarch32/cortex_a12.S
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -73,8 +73,6 @@
 	b	cortex_a12_disable_smp
 endfunc cortex_a12_cluster_pwr_dwn
 
-errata_report_shim cortex_a12
-
 declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
 	cortex_a12_reset_func, \
 	cortex_a12_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
index b41676d..53489ad 100644
--- a/lib/cpus/aarch32/cortex_a15.S
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -172,8 +172,6 @@
 	b	cortex_a15_disable_smp
 endfunc cortex_a15_cluster_pwr_dwn
 
-errata_report_shim cortex_a15
-
 declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
 	cortex_a15_reset_func, \
 	cortex_a15_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
index 1877570..05e9616 100644
--- a/lib/cpus/aarch32/cortex_a17.S
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -106,8 +106,6 @@
 
 add_erratum_entry cortex_a17, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 
-errata_report_shim cortex_a17
-
 func cortex_a17_reset_func
 	mov	r5, lr
 	bl	cpu_get_rev_var
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
index d08b4ff..c92a8c1 100644
--- a/lib/cpus/aarch32/cortex_a32.S
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -117,8 +117,6 @@
 	b	cortex_a32_disable_smp
 endfunc cortex_a32_cluster_pwr_dwn
 
-errata_report_shim cortex_a32
-
 declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
 	cortex_a32_reset_func, \
 	cortex_a32_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
index 625ea7b..146eb9c 100644
--- a/lib/cpus/aarch32/cortex_a5.S
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -69,8 +69,6 @@
 	b	cortex_a5_disable_smp
 endfunc cortex_a5_cluster_pwr_dwn
 
-errata_report_shim cortex_a5
-
 declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
 	cortex_a5_reset_func, \
 	cortex_a5_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
index 89b238a..60be2b3 100644
--- a/lib/cpus/aarch32/cortex_a53.S
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -297,8 +297,6 @@
 	b	cortex_a53_disable_smp
 endfunc cortex_a53_cluster_pwr_dwn
 
-errata_report_shim cortex_a53
-
 declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
 	cortex_a53_reset_func, \
 	cortex_a53_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index 1e5377b..d563482 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -606,8 +606,6 @@
 	b	cortex_a57_disable_ext_debug
 endfunc cortex_a57_cluster_pwr_dwn
 
-errata_report_shim cortex_a57
-
 declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
 	cortex_a57_reset_func, \
 	cortex_a57_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
index 4842ca6..f99ae79 100644
--- a/lib/cpus/aarch32/cortex_a7.S
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -73,8 +73,6 @@
 	b	cortex_a7_disable_smp
 endfunc cortex_a7_cluster_pwr_dwn
 
-errata_report_shim cortex_a7
-
 declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
 	cortex_a7_reset_func, \
 	cortex_a7_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 77cf84d..8d399fd 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -256,8 +256,6 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-errata_report_shim cortex_a72
-
 declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
 	cortex_a72_reset_func, \
 	cortex_a72_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
index 1e9757a..dc5ff27 100644
--- a/lib/cpus/aarch32/cortex_a9.S
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -57,8 +57,6 @@
 
 add_erratum_entry cortex_a9, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 
-errata_report_shim cortex_a9
-
 func cortex_a9_reset_func
 #if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
 	ldr	r0, =wa_cve_2017_5715_bpiall_vbar
diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S
index 54c20c3..a53467a 100644
--- a/lib/cpus/aarch64/a64fx.S
+++ b/lib/cpus/aarch64/a64fx.S
@@ -16,15 +16,6 @@
 func a64fx_cluster_pwr_dwn
 endfunc a64fx_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for A64FX. Must follow AAPCS.
- */
-func a64fx_errata_report
-        ret
-endfunc a64fx_errata_report
-#endif
-
         /* ---------------------------------------------
          * This function provides cpu specific
          * register information for crash reporting.
@@ -38,12 +29,15 @@
 a64fx_regs:  /* The ascii list of register names to be reported */
         .asciz  ""
 
+cpu_reset_func_start a64fx
+cpu_reset_func_end a64fx
+
 func a64fx_cpu_reg_dump
         adr     x6, a64fx_regs
         ret
 endfunc a64fx_cpu_reg_dump
 
-declare_cpu_ops a64fx, A64FX_MIDR, CPU_NO_RESET_FUNC \
+declare_cpu_ops a64fx, A64FX_MIDR, a64fx_reset_func \
                 a64fx_core_pwr_dwn, \
                 a64fx_cluster_pwr_dwn
 
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index d47279a..9843943 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,8 @@
 #include <asm_macros.S>
 #include <cpu_macros.S>
 
+cpu_reset_prologue aem_generic
+
 func aem_generic_core_pwr_dwn
 	/* ---------------------------------------------
 	 * Disable the Data Cache.
@@ -74,14 +76,8 @@
 	b	dcsw_op_all
 endfunc aem_generic_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for AEM. Must follow AAPCS.
- */
-func aem_generic_errata_report
-	ret
-endfunc aem_generic_errata_report
-#endif
+cpu_reset_func_start aem_generic
+cpu_reset_func_end aem_generic
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
@@ -103,11 +99,11 @@
 
 
 /* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \
 	aem_generic_core_pwr_dwn, \
 	aem_generic_cluster_pwr_dwn
 
 /* cpu_ops for Foundation FVP */
-declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, aem_generic_reset_func, \
 	aem_generic_core_pwr_dwn, \
 	aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index 6ffb944..40e6200 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,7 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+cpu_reset_prologue cortex_a35
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -111,8 +112,6 @@
 	b	cortex_a35_disable_smp
 endfunc cortex_a35_cluster_pwr_dwn
 
-errata_report_shim cortex_a35
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a35 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index a59b92c..d6cf69a 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,7 @@
 #include <common/bl_common.h>
 #include <cortex_a510.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 
 /* Hardware handled coherency */
@@ -21,6 +22,8 @@
 #error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_a510
+
 workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240
 	/* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */
 	sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \
@@ -180,15 +183,18 @@
 
 check_erratum_ls cortex_a510, ERRATUM(2684597), CPU_REV(1, 2)
 
-/*
- * ERRATA_DSU_2313941 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a510
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a510_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_a510_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+workaround_reset_start cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941
+	errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_a510, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_a510, ERRATUM(2313941)
+	check_errata_dsu_2313941_impl
+	ret
+check_erratum_custom_end cortex_a510, ERRATUM(2313941)
+
+.global check_erratum_cortex_a510_2971420
+add_erratum_entry cortex_a510, ERRATUM(2971420), ERRATA_A510_2971420
+check_erratum_range cortex_a510, ERRATUM(2971420), CPU_REV(0, 1), CPU_REV(1, 3)
 
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -204,8 +210,6 @@
 	ret
 endfunc cortex_a510_core_pwr_dwn
 
-errata_report_shim cortex_a510
-
 cpu_reset_func_start cortex_a510
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S
index b8f1468..6714a53 100644
--- a/lib/cpus/aarch64/cortex_a520.S
+++ b/lib/cpus/aarch64/cortex_a520.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,7 +11,6 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
-/* .global erratum_cortex_a520_2938996_wa */
 .global check_erratum_cortex_a520_2938996
 
 /* Hardware handled coherency */
@@ -24,6 +23,8 @@
 #error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_a520
+
 workaround_reset_start cortex_a520, ERRATUM(2630792), ERRATA_A520_2630792
 	sysreg_bit_set CORTEX_A520_CPUACTLR_EL1, BIT(38)
 workaround_reset_end cortex_a520, ERRATUM(2630792)
@@ -36,23 +37,9 @@
 
 check_erratum_ls cortex_a520, ERRATUM(2858100), CPU_REV(0, 1)
 
-workaround_runtime_start cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996, CORTEX_A520_MIDR
-workaround_runtime_end cortex_a520, ERRATUM(2938996)
+add_erratum_entry cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996
 
-check_erratum_custom_start cortex_a520, ERRATUM(2938996)
-
-       /* This erratum needs to be enabled for r0p0 and r0p1.
-        * Check if revision is less than or equal to r0p1.
-        */
-
-#if ERRATA_A520_2938996
-       mov     x1, #1
-       b       cpu_rev_var_ls
-#else
-       mov     x0, #ERRATA_MISSING
-#endif
-       ret
-check_erratum_custom_end cortex_a520, ERRATUM(2938996)
+check_erratum_ls cortex_a520, ERRATUM(2938996), CPU_REV(0, 1)
 
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -68,8 +55,6 @@
 	ret
 endfunc cortex_a520_core_pwr_dwn
 
-errata_report_shim cortex_a520
-
 cpu_reset_func_start cortex_a520
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index e6fb08a..dbfff87 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,6 +12,8 @@
 #include <plat_macros.S>
 #include <lib/cpus/errata.h>
 
+cpu_reset_prologue cortex_a53
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -36,12 +38,12 @@
 /* Due to the nature of the errata it is applied unconditionally when chosen */
 check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1)
 /* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN
 
 /* Due to the nature of the errata it is applied unconditionally when chosen */
 check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2)
 /* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN
 
 workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319
 	mrs	x1, CORTEX_A53_L2ACTLR_EL1
@@ -55,7 +57,7 @@
 /* Due to the nature of the errata it is applied unconditionally when chosen */
 check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2)
 /* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN
 
 check_erratum_custom_start cortex_a53, ERRATUM(835769)
 	cmp	x0, CPU_REV(0, 4)
@@ -78,7 +80,7 @@
 check_erratum_custom_end cortex_a53, ERRATUM(835769)
 
 /* workaround at build time */
-add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769
 
 	/*
 	 * Disable the cache non-temporal hint.
@@ -114,7 +116,7 @@
 check_erratum_custom_end cortex_a53, ERRATUM(843419)
 
 /* workaround at build time */
-add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419
 
 	/*
 	 * Earlier revisions of the core are affected as well, but don't
@@ -131,7 +133,7 @@
 check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
 
 /* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
 
 cpu_reset_func_start cortex_a53
 	/* Enable the SMP bit. */
@@ -199,8 +201,6 @@
 	b	cortex_a53_disable_smp
 endfunc cortex_a53_cluster_pwr_dwn
 
-errata_report_shim cortex_a53
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a53 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 712b6e0..cf91431 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,7 @@
 #include <common/bl_common.h>
 #include <cortex_a55.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 
 /* Hardware handled coherency */
@@ -19,23 +20,25 @@
 	.globl cortex_a55_reset_func
 	.globl cortex_a55_core_pwr_dwn
 
-/* ERRATA_DSU_798953:
- * The errata is defined in dsu_helpers.S but applies to cortex_a55
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a55_798953, check_errata_dsu_798953
-.equ erratum_cortex_a55_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a55, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+cpu_reset_prologue cortex_a55
 
-/* ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S but applies to cortex_a55
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a55_936184, check_errata_dsu_936184
-.equ erratum_cortex_a55_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a55, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953
+	errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a55, ERRATUM(798953)
+
+check_erratum_custom_start cortex_a55, ERRATUM(798953)
+	check_errata_dsu_798953_impl
+	ret
+check_erratum_custom_end cortex_a55, ERRATUM(798953)
+
+workaround_reset_start cortex_a55, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a55, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a55, ERRATUM(936184)
+	check_errata_dsu_936184_impl
+	ret
+check_erratum_custom_end cortex_a55, ERRATUM(936184)
 
 workaround_reset_start cortex_a55, ERRATUM(768277), ERRATA_A55_768277
 	sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
@@ -50,8 +53,7 @@
 
 check_erratum_custom_start cortex_a55, ERRATUM(778703)
 	mov	x16, x30
-	mov	x1, #0x00
-	bl	cpu_rev_var_ls
+	cpu_rev_var_ls	CPU_REV(0, 0)
 	/*
 	 * Check that no private L2 cache is configured
 	 */
@@ -111,13 +113,11 @@
 check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
 
 /* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
 
 cpu_reset_func_start cortex_a55
 cpu_reset_func_end cortex_a55
 
-errata_report_shim cortex_a55
-
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index d2c12d8..adacc5c 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -13,6 +13,8 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+cpu_reset_prologue cortex_a57
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -81,7 +83,7 @@
 
 /* erratum always worked around, but report it correctly */
 check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0)
-add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN
 
 workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420
 	sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
@@ -150,7 +152,7 @@
 
 check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
 /* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
 
 workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 #if IMAGE_BL31
@@ -284,8 +286,6 @@
 	b	cortex_a57_disable_ext_debug
 endfunc cortex_a57_cluster_pwr_dwn
 
-errata_report_shim cortex_a57
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a57 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
index 666324c..3c32adb 100644
--- a/lib/cpus/aarch64/cortex_a65.S
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <common/debug.h>
 #include <cortex_a65.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 
 /* Hardware handled coherency */
@@ -22,20 +23,19 @@
 #error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
-/* -------------------------------------------------
- * The CPU Ops reset function for Cortex-A65.
- * Shall clobber: x0-x19
- * -------------------------------------------------
- */
-func cortex_a65_reset_func
-	mov	x19, x30
+cpu_reset_prologue cortex_a65
 
-#if ERRATA_DSU_936184
-	bl	errata_dsu_936184_wa
-#endif
+workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a65, ERRATUM(936184)
 
-	ret	x19
-endfunc cortex_a65_reset_func
+check_erratum_custom_start cortex_a65, ERRATUM(936184)
+	check_errata_dsu_936184_impl
+	ret
+check_erratum_custom_end cortex_a65, ERRATUM(936184)
+
+cpu_reset_func_start cortex_a65
+cpu_reset_func_end cortex_a65
 
 func cortex_a65_cpu_pwr_dwn
 	mrs	x0, CORTEX_A65_CPUPWRCTLR_EL1
@@ -45,27 +45,6 @@
 	ret
 endfunc cortex_a65_cpu_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex-A65. Must follow AAPCS.
- */
-func cortex_a65_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_DSU_936184, cortex_a65, dsu_936184
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc cortex_a65_errata_report
-#endif
-
 .section .rodata.cortex_a65_regs, "aS"
 cortex_a65_regs:  /* The ascii list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
index 85d1894..f1a63b0 100644
--- a/lib/cpus/aarch64/cortex_a65ae.S
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,7 @@
 #include <cortex_a65ae.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include <dsu_macros.S>
 
 /* Hardware handled coherency */
 #if !HW_ASSISTED_COHERENCY
@@ -22,15 +23,16 @@
 #error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
- /*
-  * ERRATA_DSU_936184 :
-  * The errata is defined in dsu_helpers.S but applies to cortex_a65ae
-  * as well. Henceforth creating symbolic names to the already existing errata
-  * workaround functions to get them registered under the Errata Framework.
-  */
-.equ check_erratum_cortex_a65ae_936184, check_errata_dsu_936184
-.equ erratum_cortex_a65ae_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue cortex_a65ae
+
+workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a65ae, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a65ae, ERRATUM(936184)
+	check_errata_dsu_936184_impl
+	ret
+check_erratum_custom_end cortex_a65ae, ERRATUM(936184)
 
 cpu_reset_func_start cortex_a65ae
 cpu_reset_func_end cortex_a65ae
@@ -41,8 +43,6 @@
 	ret
 endfunc cortex_a65ae_cpu_pwr_dwn
 
-errata_report_shim cortex_a65ae
-
 .section .rodata.cortex_a65ae_regs, "aS"
 cortex_a65ae_regs:  /* The ascii list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index b9b9204..40e777a 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -9,6 +9,7 @@
 #include <common/bl_common.h>
 #include <cortex_a710.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 #include "wa_cve_2022_23960_bhb_vector.S"
 
@@ -28,12 +29,7 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a710,  CVE(2024, 5660)
-
-check_erratum_ls cortex_a710, CVE(2024, 5660), CPU_REV(2, 1)
+cpu_reset_prologue cortex_a710
 
 workaround_reset_start cortex_a710, ERRATUM(1987031), ERRATA_A710_1987031
 	ldr x0,=0x6
@@ -91,13 +87,6 @@
 
 check_erratum_range cortex_a710, ERRATUM(2055002), CPU_REV(1, 0), CPU_REV(2, 0)
 
-workaround_reset_start cortex_a710, ERRATUM(2058056), ERRATA_A710_2058056
-	sysreg_bitfield_insert CORTEX_A710_CPUECTLR2_EL1, CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV, \
-		CPUECTLR2_EL1_PF_MODE_LSB, CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_a710, ERRATUM(2058056)
-
-check_erratum_ls cortex_a710, ERRATUM(2058056), CPU_REV(2, 1)
-
 workaround_reset_start cortex_a710, ERRATUM(2081180), ERRATA_A710_2081180
 	ldr	x0,=0x3
 	msr	S3_6_c15_c8_0,x0
@@ -164,6 +153,7 @@
 
 check_erratum_ls cortex_a710, ERRATUM(2282622), CPU_REV(2, 1)
 
+.global erratum_cortex_a710_2291219_wa
 workaround_runtime_start cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
 	/* Set bit 36 in ACTLR2_EL1 */
 	sysreg_bit_set CORTEX_A710_CPUACTLR2_EL1, CORTEX_A710_CPUACTLR2_EL1_BIT_36
@@ -171,14 +161,14 @@
 
 check_erratum_ls cortex_a710, ERRATUM(2291219), CPU_REV(2, 0)
 
-/*
- * ERRATA_DSU_2313941 is defined in dsu_helpers.S but applies to Cortex-A710 as
- * well. Create a symbollic link to existing errata workaround to get them
- * registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a710_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_a710_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+workaround_reset_start cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941
+	errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_a710, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_a710, ERRATUM(2313941)
+	check_errata_dsu_2313941_impl
+	ret
+check_erratum_custom_end cortex_a710, ERRATUM(2313941)
 
 workaround_reset_start cortex_a710, ERRATUM(2371105), ERRATA_A710_2371105
 	/* Set bit 40 in CPUACTLR2_EL1 */
@@ -208,6 +198,10 @@
 
 check_erratum_ls cortex_a710, ERRATUM(2778471), CPU_REV(2, 1)
 
+add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772
+
+check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1)
+
 workaround_reset_start cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -220,9 +214,12 @@
 
 check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
-add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772, NO_APPLY_AT_RESET
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a710,  CVE(2024, 5660)
 
-check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1)
+check_erratum_ls cortex_a710, CVE(2024, 5660), CPU_REV(2, 1)
 
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -242,8 +239,6 @@
 	ret
 endfunc cortex_a710_core_pwr_dwn
 
-errata_report_shim cortex_a710
-
 cpu_reset_func_start cortex_a710
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S
index 737d7b8..dcad5b7 100644
--- a/lib/cpus/aarch64/cortex_a715.S
+++ b/lib/cpus/aarch64/cortex_a715.S
@@ -28,6 +28,8 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715
 #endif /* WORKAROUND_CVE_2022_23960 */
 
+cpu_reset_prologue cortex_a715
+
 workaround_reset_start cortex_a715, ERRATUM(2331818), ERRATA_A715_2331818
         sysreg_bit_set CORTEX_A715_CPUACTLR2_EL1, BIT(20)
 workaround_reset_end cortex_a715, ERRATUM(2331818)
@@ -117,6 +119,29 @@
 
 check_erratum_ls cortex_a715, ERRATUM(2728106), CPU_REV(1, 1)
 
+workaround_reset_start cortex_a715, ERRATUM(2804830), ERRATA_A715_2804830
+	/* Workaround changes based on CORE_CACHE_PROTECTIONS field (bit 1) */
+	mrs x0, CORTEX_A715_CPUCFR_EL1
+	tbz x0, #1, wa_2804830_core_cache_prot_false
+
+	/* CORE_CACHE_PROTECTIONS==true */
+	sysreg_bit_set CORTEX_A715_CPUACTLR3_EL1, BIT(2)
+	sysreg_bit_set CORTEX_A715_CPUECTLR_EL1, BIT(23)
+	b wa_2804830_done
+
+	/* CORE_CACHE_PROTECTIONS==false */
+wa_2804830_core_cache_prot_false:
+	sysreg_bit_set CORTEX_A715_CPUECTLR2_EL1, BIT(7)
+
+wa_2804830_done:
+workaround_reset_end cortex_a715, ERRATUM(2804830)
+
+check_erratum_ls cortex_a715, ERRATUM(2804830), CPU_REV(1, 2)
+
+add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560
+
+check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3)
+
 workaround_reset_start cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -129,10 +154,6 @@
 
 check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
-add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3)
-
 cpu_reset_func_start cortex_a715
 	/* Disable speculative loads */
 	msr	SSBS, xzr
@@ -154,8 +175,6 @@
 	ret
 endfunc cortex_a715_core_pwr_dwn
 
-errata_report_shim cortex_a715
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-A715 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 997f261..fee28ee 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,6 +15,8 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
 #endif /* WORKAROUND_CVE_2022_23960 */
 
+cpu_reset_prologue cortex_a72
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -92,7 +94,7 @@
 /* Due to the nature of the errata it is applied unconditionally when chosen */
 check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
 /* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
 
 workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 #if IMAGE_BL31
@@ -271,8 +273,6 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-errata_report_shim cortex_a72
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a72 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S
index e69ec24..2991f93 100644
--- a/lib/cpus/aarch64/cortex_a720.S
+++ b/lib/cpus/aarch64/cortex_a720.S
@@ -22,6 +22,8 @@
 #error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_a720
+
 .global check_erratum_cortex_a720_3699561
 
 #if WORKAROUND_CVE_2022_23960
@@ -74,7 +76,7 @@
 
 check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
-add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561
 
 check_erratum_ls cortex_a720, ERRATUM(3699561), CPU_REV(0, 2)
 
@@ -98,8 +100,6 @@
 	ret
 endfunc cortex_a720_core_pwr_dwn
 
-errata_report_shim cortex_a720
-
 	/* ---------------------------------------------
 	 * This function provides Cortex A720-specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a725.S b/lib/cpus/aarch64/cortex_a725.S
index 7960521..a8c0db2 100644
--- a/lib/cpus/aarch64/cortex_a725.S
+++ b/lib/cpus/aarch64/cortex_a725.S
@@ -21,9 +21,11 @@
 #error "Cortex-A725 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_a725
+
 .global check_erratum_cortex_a725_3699564
 
-add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564
 
 check_erratum_ls cortex_a725, ERRATUM(3699564), CPU_REV(0, 1)
 
@@ -46,8 +48,6 @@
 	ret
 endfunc cortex_a725_core_pwr_dwn
 
-errata_report_shim cortex_a725
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-A725 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 3a6b922..d1fc6d4 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,8 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+cpu_reset_prologue cortex_a73
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache
 	 * ---------------------------------------------
@@ -178,9 +180,6 @@
 	b	cortex_a73_disable_smp
 endfunc cortex_a73_cluster_pwr_dwn
 
-
-errata_report_shim cortex_a73
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a73 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 5369f10..13599ca 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,7 @@
 #include <cortex_a75.h>
 #include <cpuamu.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 
 .global check_erratum_cortex_a75_764081
 
@@ -17,6 +18,8 @@
 #error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
 #endif
 
+cpu_reset_prologue cortex_a75
+
 workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081
 	sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT
 workaround_reset_end cortex_a75, ERRATUM(764081)
@@ -29,23 +32,23 @@
 
 check_erratum_ls cortex_a75, ERRATUM(790748), CPU_REV(0, 0)
 
-/* ERRATA_DSU_798953 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a75
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a75_798953, check_errata_dsu_798953
-.equ erratum_cortex_a75_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a75, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+workaround_reset_start cortex_a75, ERRATUM(798953), ERRATA_DSU_798953
+	errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a75, ERRATUM(798953)
 
-/* ERRATA_DSU_936184 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a75
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a75_936184, check_errata_dsu_936184
-.equ erratum_cortex_a75_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a75, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+check_erratum_custom_start cortex_a75, ERRATUM(798953)
+	check_errata_dsu_798953_impl
+	ret
+check_erratum_custom_end cortex_a75, ERRATUM(798953)
+
+workaround_reset_start cortex_a75, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a75, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a75, ERRATUM(936184)
+	check_errata_dsu_936184_impl
+	ret
+check_erratum_custom_end cortex_a75, ERRATUM(936184)
 
 workaround_reset_start cortex_a75, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 #if IMAGE_BL31
@@ -148,8 +151,6 @@
 	ret
 endfunc cortex_a75_core_pwr_dwn
 
-errata_report_shim cortex_a75
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a75 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 46a4e3c..822ef05 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,7 @@
 #include <common/bl_common.h>
 #include <cortex_a76.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 #include <services/arm_arch_svc.h>
 #include "wa_cve_2022_23960_bhb.S"
@@ -29,6 +30,8 @@
 #define ESR_EL3_A64_SMC0	0x5e000000
 #define ESR_EL3_A32_SMC0	0x4e000000
 
+cpu_reset_prologue cortex_a76
+
 #if DYNAMIC_WORKAROUND_CVE_2018_3639
 	/*
 	 * This macro applies the mitigation for CVE-2018-3639.
@@ -344,11 +347,10 @@
 check_erratum_custom_start cortex_a76, ERRATUM(1286807)
 #if ERRATA_A76_1286807
 	mov x0, #ERRATA_APPLIES
-	ret
 #else
-	mov	x1, #0x30
-	b	cpu_rev_var_ls
+	cpu_rev_var_ls	CPU_REV(3, 0)
 #endif
+	ret
 check_erratum_custom_end cortex_a76, ERRATUM(1286807)
 
 workaround_reset_start cortex_a76, ERRATUM(1791580), ERRATA_A76_1791580
@@ -419,35 +421,34 @@
 check_erratum_custom_start cortex_a76, ERRATUM(1165522)
 #if ERRATA_A76_1165522
 	mov	x0, #ERRATA_APPLIES
-	ret
 #else
-	mov	x1, #0x30
-	b	cpu_rev_var_ls
+	cpu_rev_var_ls	CPU_REV(3, 0)
 #endif
+	ret
 check_erratum_custom_end cortex_a76, ERRATUM(1165522)
 
 check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
 /* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
-/* ERRATA_DSU_798953 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a76
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a76_798953, check_errata_dsu_798953
-.equ erratum_cortex_a76_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a76, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953
+	errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a76, ERRATUM(798953)
 
-/* ERRATA_DSU_936184 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a76
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a76_936184, check_errata_dsu_936184
-.equ erratum_cortex_a76_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a76, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+check_erratum_custom_start cortex_a76, ERRATUM(798953)
+	check_errata_dsu_798953_impl
+	ret
+check_erratum_custom_end cortex_a76, ERRATUM(798953)
+
+workaround_reset_start cortex_a76, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a76, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a76, ERRATUM(936184)
+	check_errata_dsu_936184_impl
+	ret
+check_erratum_custom_end cortex_a76, ERRATUM(936184)
 
 cpu_reset_func_start cortex_a76
 
@@ -511,8 +512,6 @@
 	ret
 endfunc cortex_a76_core_pwr_dwn
 
-errata_report_shim cortex_a76
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a76 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S
index 08a6ef9..54af9a0 100644
--- a/lib/cpus/aarch64/cortex_a76ae.S
+++ b/lib/cpus/aarch64/cortex_a76ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,8 @@
 #error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_a76ae
+
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae
 #endif /* WORKAROUND_CVE_2022_23960 */
@@ -41,8 +43,6 @@
 cpu_reset_func_start cortex_a76ae
 cpu_reset_func_end cortex_a76ae
 
-errata_report_shim cortex_a76ae
-
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 78fc496..82a20ec 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -17,6 +17,8 @@
 #error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled"
 #endif
 
+cpu_reset_prologue cortex_a77
+
 /* 64-bit only core */
 #if CTX_INCLUDE_AARCH32_REGS == 1
 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
@@ -26,18 +28,10 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a77, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a77, CVE(2024, 5660)
-
-check_erratum_ls cortex_a77, CVE(2024, 5660), CPU_REV(1, 1)
-
 workaround_reset_start cortex_a77, ERRATUM(1508412), ERRATA_A77_1508412
 	/* move cpu revision in again and compare against r0p0 */
 	mov	x0, x7
-	mov	x1, #CPU_REV(0, 0)
-	bl	cpu_rev_var_ls
+	cpu_rev_var_ls	CPU_REV(0, 0)
 	cbz	x0, 1f
 
 	ldr	x0, =0x0
@@ -149,6 +143,13 @@
 
 check_erratum_chosen cortex_a77, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a77, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a77, CVE(2024, 5660)
+
+check_erratum_ls cortex_a77, CVE(2024, 5660), CPU_REV(1, 1)
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A77. Must follow AAPCS.
 	 * -------------------------------------------------
@@ -174,7 +175,6 @@
 	ret
 endfunc cortex_a77_core_pwr_dwn
 
-errata_report_shim cortex_a77
 	/* ---------------------------------------------
 	 * This function provides Cortex-A77 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index 917643d..b166823 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -24,12 +24,7 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78, CVE(2024, 5660), CPU_REV(1, 2)
+cpu_reset_prologue cortex_a78
 
 workaround_reset_start cortex_a78, ERRATUM(1688305), ERRATA_A78_1688305
 	sysreg_bit_set CORTEX_A78_ACTLR2_EL1, CORTEX_A78_ACTLR2_EL1_BIT_1
@@ -104,16 +99,6 @@
 
 check_erratum_ls cortex_a78, ERRATUM(1952683), CPU_REV(0, 0)
 
-workaround_reset_start cortex_a78, ERRATUM(2132060), ERRATA_A78_2132060
-	/* Apply the workaround. */
-	mrs	x1, CORTEX_A78_CPUECTLR_EL1
-	mov	x0, #CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV
-	bfi	x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
-	msr	CORTEX_A78_CPUECTLR_EL1, x1
-workaround_reset_end cortex_a78, ERRATUM(2132060)
-
-check_erratum_ls cortex_a78, ERRATUM(2132060), CPU_REV(1, 2)
-
 workaround_reset_start cortex_a78, ERRATUM(2242635), ERRATA_A78_2242635
 	ldr	x0, =0x5
 	msr	S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
@@ -174,6 +159,13 @@
 
 check_erratum_chosen cortex_a78, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78, CVE(2024, 5660), CPU_REV(1, 2)
+
 cpu_reset_func_start cortex_a78
 #if ENABLE_FEAT_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
@@ -205,8 +197,6 @@
 	ret
 endfunc cortex_a78_core_pwr_dwn
 
-errata_report_shim cortex_a78
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a78 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
index 71639fe..63bc936 100644
--- a/lib/cpus/aarch64/cortex_a78_ae.S
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
  * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -22,12 +22,7 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78_ae, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78_ae, CVE(2024, 5660), CPU_REV(0, 3)
+cpu_reset_prologue cortex_a78_ae
 
 workaround_reset_start cortex_a78_ae, ERRATUM(1941500), ERRATA_A78_AE_1941500
 	sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, CORTEX_A78_AE_CPUECTLR_EL1_BIT_8
@@ -103,6 +98,13 @@
 
 check_erratum_chosen cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78_ae, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78_ae, CVE(2024, 5660), CPU_REV(0, 3)
+
 cpu_reset_func_start cortex_a78_ae
 #if ENABLE_FEAT_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
@@ -135,8 +137,6 @@
 	ret
 endfunc cortex_a78_ae_core_pwr_dwn
 
-errata_report_shim cortex_a78_ae
-
 	/* -------------------------------------------------------
 	 * This function provides cortex_a78_ae specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
index 2b6ec83..19d988e 100644
--- a/lib/cpus/aarch64/cortex_a78c.S
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,12 +21,7 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78c, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78c, CVE(2024, 5660), CPU_REV(0, 2)
+cpu_reset_prologue cortex_a78c
 
 workaround_reset_start cortex_a78c, ERRATUM(1827430), ERRATA_A78C_1827430
 	/* Disable allocation of splintered pages in the L2 TLB */
@@ -42,18 +37,6 @@
 
 check_erratum_ls cortex_a78c, ERRATUM(1827440), CPU_REV(0, 0)
 
-workaround_reset_start cortex_a78c, ERRATUM(2132064), ERRATA_A78C_2132064
-	/* --------------------------------------------------------
-	 * Place the data prefetcher in the most conservative mode
-	 * to reduce prefetches by writing the following bits to
-	 * the value indicated: ecltr[7:6], PF_MODE = 2'b11
-	 * --------------------------------------------------------
-	 */
-	sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, (CORTEX_A78C_CPUECTLR_EL1_BIT_6 | CORTEX_A78C_CPUECTLR_EL1_BIT_7)
-workaround_reset_end cortex_a78c, ERRATUM(2132064)
-
-check_erratum_range cortex_a78c, ERRATUM(2132064), CPU_REV(0, 1), CPU_REV(0, 2)
-
 workaround_reset_start cortex_a78c, ERRATUM(2242638), ERRATA_A78C_2242638
 	ldr	x0, =0x5
 	msr	CORTEX_A78C_IMP_CPUPSELR_EL3, x0
@@ -125,11 +108,16 @@
 #endif /* IMAGE_BL31 */
 workaround_reset_end cortex_a78c, CVE(2022, 23960)
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78c, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78c, CVE(2024, 5660), CPU_REV(0, 2)
+
 cpu_reset_func_start cortex_a78c
 cpu_reset_func_end cortex_a78c
 
-errata_report_shim cortex_a78c
-
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S
index dc704f2..61026bf 100644
--- a/lib/cpus/aarch64/cortex_gelas.S
+++ b/lib/cpus/aarch64/cortex_gelas.S
@@ -21,6 +21,8 @@
 #error "Gelas supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_gelas
+
 cpu_reset_func_start cortex_gelas
 	/* ----------------------------------------------------
 	 * Disable speculative loads
@@ -58,8 +60,6 @@
 	ret
 endfunc cortex_gelas_core_pwr_dwn
 
-errata_report_shim cortex_gelas
-
 	/* ---------------------------------------------
 	 * This function provides Gelas specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
index 82bdadc..cb759cc 100644
--- a/lib/cpus/aarch64/cortex_x1.S
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -23,12 +23,7 @@
 	wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x1, CVE(2024, 5660)
-
-check_erratum_ls cortex_x1, CVE(2024, 5660), CPU_REV(1, 2)
+cpu_reset_prologue cortex_x1
 
 workaround_reset_start cortex_x1, ERRATUM(1688305), ERRATA_X1_1688305
 	sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(1)
@@ -60,6 +55,13 @@
 #endif /* IMAGE_BL31 */
 workaround_reset_end cortex_x1, CVE(2022, 23960)
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x1, CVE(2024, 5660)
+
+check_erratum_ls cortex_x1, CVE(2024, 5660), CPU_REV(1, 2)
+
 cpu_reset_func_start cortex_x1
 cpu_reset_func_end cortex_x1
 
@@ -73,8 +75,6 @@
 	ret
 endfunc cortex_x1_core_pwr_dwn
 
-errata_report_shim cortex_x1
-
        /* ---------------------------------------------
 	* This function provides Cortex X1 specific
 	* register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index c226d51..8af6867 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -9,6 +9,7 @@
 #include <common/bl_common.h>
 #include <cortex_x2.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <plat_macros.S>
 #include "wa_cve_2022_23960_bhb_vector.S"
 
@@ -24,20 +25,11 @@
 
 .global check_erratum_cortex_x2_3701772
 
-add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
-
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x2, CVE(2024, 5660)
-
-check_erratum_ls cortex_x2, CVE(2024, 5660), CPU_REV(2, 1)
+cpu_reset_prologue cortex_x2
 
 workaround_reset_start cortex_x2, ERRATUM(2002765), ERRATA_X2_2002765
 	ldr	x0, =0x6
@@ -58,13 +50,6 @@
 
 check_erratum_ls cortex_x2, ERRATUM(2017096), CPU_REV(2, 0)
 
-workaround_reset_start cortex_x2, ERRATUM(2058056), ERRATA_X2_2058056
-	sysreg_bitfield_insert CORTEX_X2_CPUECTLR2_EL1, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
-	CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_x2, ERRATUM(2058056)
-
-check_erratum_ls cortex_x2, ERRATUM(2058056), CPU_REV(2, 1)
-
 workaround_reset_start cortex_x2, ERRATUM(2081180), ERRATA_X2_2081180
 	/* Apply instruction patching sequence */
 	ldr	x0, =0x3
@@ -124,6 +109,15 @@
 
 check_erratum_ls cortex_x2, ERRATUM(2282622), CPU_REV(2, 1)
 
+workaround_reset_start cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941
+	errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_x2, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_x2, ERRATUM(2313941)
+	check_errata_dsu_2313941_impl
+	ret
+check_erratum_custom_end cortex_x2, ERRATUM(2313941)
+
 workaround_reset_start cortex_x2, ERRATUM(2371105), ERRATA_X2_2371105
 	/* Set bit 40 in CPUACTLR2_EL1 */
 	sysreg_bit_set CORTEX_X2_CPUACTLR2_EL1, CORTEX_X2_CPUACTLR2_EL1_BIT_40
@@ -152,6 +146,10 @@
 
 check_erratum_ls cortex_x2, ERRATUM(2778471), CPU_REV(2, 1)
 
+add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772
+
+check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
+
 workaround_reset_start cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -164,15 +162,12 @@
 
 check_erratum_chosen cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
-/*
- * ERRATA_DSU_2313941 :
- * The errata is defined in dsu_helpers.S but applies to cortex_x2
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_x2_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_x2_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x2, CVE(2024, 5660)
+
+check_erratum_ls cortex_x2, CVE(2024, 5660), CPU_REV(2, 1)
 
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
@@ -190,8 +185,6 @@
 	ret
 endfunc cortex_x2_core_pwr_dwn
 
-errata_report_shim cortex_x2
-
 cpu_reset_func_start cortex_x2
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 2d2caf1..dd01789 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -24,27 +24,11 @@
 
 .global check_erratum_cortex_x3_3701769
 
-add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
-
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x3, CVE(2024, 5660)
-
-check_erratum_ls cortex_x3, CVE(2024, 5660), CPU_REV(1, 2)
-
-workaround_reset_start cortex_x3, ERRATUM(2070301), ERRATA_X3_2070301
-	sysreg_bitfield_insert CORTEX_X3_CPUECTLR2_EL1, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV, \
-	CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_x3, ERRATUM(2070301)
-
-check_erratum_ls cortex_x3, ERRATUM(2070301), CPU_REV(1, 2)
+cpu_reset_prologue cortex_x3
 
 workaround_reset_start cortex_x3, ERRATUM(2266875), ERRATA_X3_2266875
         sysreg_bit_set CORTEX_X3_CPUACTLR_EL1, BIT(22)
@@ -58,6 +42,7 @@
 
 check_erratum_ls cortex_x3, ERRATUM(2302506), CPU_REV(1, 1)
 
+.global erratum_cortex_x3_2313909_wa
 workaround_runtime_start cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
 	sysreg_bit_set	CORTEX_X3_CPUACTLR2_EL1, CORTEX_X3_CPUACTLR2_EL1_BIT_36
 workaround_runtime_end cortex_x3, ERRATUM(2313909), NO_ISB
@@ -109,6 +94,10 @@
 
 check_erratum_ls cortex_x3, ERRATUM(2779509), CPU_REV(1, 1)
 
+add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769
+
+check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
+
 workaround_reset_start cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	override_vector_table wa_cve_vbar_cortex_x3
@@ -117,6 +106,13 @@
 
 check_erratum_chosen cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x3, CVE(2024, 5660)
+
+check_erratum_ls cortex_x3, CVE(2024, 5660), CPU_REV(1, 2)
+
 workaround_reset_start cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
 	/* ---------------------------------
 	 * Sets BIT41 of CPUACTLR6_EL1 which
@@ -149,8 +145,6 @@
 	ret
 endfunc cortex_x3_core_pwr_dwn
 
-errata_report_shim cortex_x3
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-X3-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index 6cb7726..74687a9 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -22,6 +22,8 @@
 #error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue cortex_x4
+
 .global check_erratum_cortex_x4_2726228
 .global check_erratum_cortex_x4_3701758
 
@@ -29,30 +31,9 @@
         wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-workaround_runtime_start cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228, CORTEX_X4_MIDR
-workaround_runtime_end cortex_x4, ERRATUM(2726228)
+add_erratum_entry cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228
 
-check_erratum_custom_start cortex_x4, ERRATUM(2726228)
-
-	/* This erratum needs to be enabled for r0p0 and r0p1.
-	 * Check if revision is less than or equal to r0p1.
-	 */
-
-#if ERRATA_X4_2726228
-	mov	x1, #1
-	b	cpu_rev_var_ls
-#else
-	mov	x0, #ERRATA_MISSING
-#endif
-	ret
-check_erratum_custom_end cortex_x4, ERRATUM(2726228)
-
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x4, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set CORTEX_X4_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x4, CVE(2024, 5660)
-
-check_erratum_ls cortex_x4, CVE(2024, 5660), CPU_REV(0, 2)
+check_erratum_ls cortex_x4, ERRATUM(2726228), CPU_REV(0, 1)
 
 workaround_runtime_start cortex_x4, ERRATUM(2740089), ERRATA_X4_2740089
 	/* dsb before isb of power down sequence */
@@ -112,6 +93,10 @@
 
 check_erratum_ls cortex_x4, ERRATUM(3076789), CPU_REV(0, 1)
 
+add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758
+
+check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3)
+
 workaround_reset_start cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -124,6 +109,13 @@
 
 check_erratum_chosen cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x4, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set CORTEX_X4_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x4, CVE(2024, 5660)
+
+check_erratum_ls cortex_x4, CVE(2024, 5660), CPU_REV(0, 2)
+
 workaround_reset_start cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
 	/* ---------------------------------
 	 * Sets BIT41 of CPUACTLR6_EL1 which
@@ -135,10 +127,6 @@
 
 check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
 
-add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3)
-
 cpu_reset_func_start cortex_x4
 	/* Disable speculative loads */
 	msr	SSBS, xzr
@@ -161,8 +149,6 @@
 	ret
 endfunc cortex_x4_core_pwr_dwn
 
-errata_report_shim cortex_x4
-
 	/* ---------------------------------------------
 	 * This function provides Cortex X4-specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S
index a0fedb0..7dec375 100644
--- a/lib/cpus/aarch64/cortex_x925.S
+++ b/lib/cpus/aarch64/cortex_x925.S
@@ -21,9 +21,9 @@
 #error "Cortex-X925 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
-.global check_erratum_cortex_x925_3701747
+cpu_reset_prologue cortex_x925
 
-add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747, NO_APPLY_AT_RESET
+add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747
 
 check_erratum_ls cortex_x925, ERRATUM(3701747), CPU_REV(0, 1)
 
@@ -79,8 +79,6 @@
 	ret
 endfunc cortex_x925_core_pwr_dwn
 
-errata_report_shim cortex_x925
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-X925 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 9c3c025..da83cfd 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,47 +14,6 @@
 #include <lib/cpus/errata.h>
 #include <lib/el3_runtime/cpu_data.h>
 
- /* Reset fn is needed in BL at reset vector */
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
-	(defined(IMAGE_BL2) && RESET_TO_BL2)
-	/*
-	 * The reset handler common to all platforms.  After a matching
-	 * cpu_ops structure entry is found, the correponding reset_handler
-	 * in the cpu_ops is invoked.
-	 * Clobbers: x0 - x19, x30
-	 */
-	.globl	reset_handler
-func reset_handler
-	mov	x19, x30
-
-	/* The plat_reset_handler can clobber x0 - x18, x30 */
-	bl	plat_reset_handler
-
-	/* Get the matching cpu_ops pointer */
-	bl	get_cpu_ops_ptr
-
-#if ENABLE_ASSERTIONS
-	/*
-	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
-	 * suggest that the proper CPU file hasn't been included.
-	 */
-	cmp	x0, #0
-	ASM_ASSERT(ne)
-#endif
-
-	/* Get the cpu_ops reset handler */
-	ldr	x2, [x0, #CPU_RESET_FUNC]
-	mov	x30, x19
-	cbz	x2, 1f
-
-	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
-	br	x2
-1:
-	ret
-endfunc reset_handler
-
-#endif
-
 #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
 	/*
 	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
@@ -213,83 +172,24 @@
 	b	1b
 error_exit:
 #endif
+#if ENABLE_ASSERTIONS
+	/*
+	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
+	 * suggest that the proper CPU file hasn't been included.
+	 */
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
 	ret
 endfunc get_cpu_ops_ptr
 
-/*
- * Extract CPU revision and variant, and combine them into a single numeric for
- * easier comparison.
- */
 	.globl	cpu_get_rev_var
 func cpu_get_rev_var
-	mrs	x1, midr_el1
-
-	/*
-	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
-	 * as variant[7:4] and revision[3:0] of x0.
-	 *
-	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
-	 * extract x1[3:0] into x0[3:0] retaining other bits.
-	 */
-	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
-	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+	get_rev_var x0, x1
 	ret
 endfunc cpu_get_rev_var
 
 /*
- * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
- * application purposes. If the revision-variant is less than or same as a given
- * value, indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x3
- */
-	.globl	cpu_rev_var_ls
-func cpu_rev_var_ls
-	mov	x2, #ERRATA_APPLIES
-	mov	x3, #ERRATA_NOT_APPLIES
-	cmp	x0, x1
-	csel	x0, x2, x3, ls
-	ret
-endfunc cpu_rev_var_ls
-
-/*
- * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
- * application purposes. If the revision-variant is higher than or same as a
- * given value, indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x3
- */
-	.globl	cpu_rev_var_hs
-func cpu_rev_var_hs
-	mov	x2, #ERRATA_APPLIES
-	mov	x3, #ERRATA_NOT_APPLIES
-	cmp	x0, x1
-	csel	x0, x2, x3, hs
-	ret
-endfunc cpu_rev_var_hs
-
-/*
- * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
- * application purposes. If the revision-variant is between or includes the given
- * values, this indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x4
- */
-	.globl	cpu_rev_var_range
-func cpu_rev_var_range
-	mov	x3, #ERRATA_APPLIES
-	mov	x4, #ERRATA_NOT_APPLIES
-	cmp	x0, x1
-	csel	x1, x3, x4, hs
-	cbz	x1, 1f
-	cmp	x0, x2
-	csel	x1, x3, x4, ls
-1:
-	mov	x0, x1
-	ret
-endfunc cpu_rev_var_range
-
-/*
  * int check_wa_cve_2017_5715(void);
  *
  * This function returns:
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index 884281d..64158e7 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -13,6 +13,8 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+cpu_reset_prologue denver
+
 	/* -------------------------------------------------
 	 * CVE-2017-5715 mitigation
 	 *
@@ -296,8 +298,6 @@
 	ret
 endfunc denver_cluster_pwr_dwn
 
-errata_report_shim denver
-
 	/* ---------------------------------------------
 	 * This function provides Denver specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
deleted file mode 100644
index 8e5b459..0000000
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <dsu_def.h>
-#include <lib/cpus/errata.h>
-
-	/* -----------------------------------------------------------------------
-	 * DSU erratum 798953 check function
-	 * Checks the DSU variant, revision and configuration to determine if
-	 * the erratum applies. Erratum applies on all configurations of the
-	 * DSU and if revision-variant is r0p0.
-	 *
-	 * The erratum was fixed in r0p1.
-	 *
-	 * This function is called from both assembly and C environment. So it
-	 * follows AAPCS.
-	 *
-	 * Clobbers: x0-x3
-	 * -----------------------------------------------------------------------
-	 */
-	.globl	check_errata_dsu_798953
-	.globl	errata_dsu_798953_wa
-
-func check_errata_dsu_798953
-	mov	x2, #ERRATA_APPLIES
-	mov	x3, #ERRATA_NOT_APPLIES
-
-	/* Check if DSU is equal to r0p0 */
-	mrs	x1, CLUSTERIDR_EL1
-
-	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
-	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
-			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
-	mov	x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
-	cmp	x0, x1
-	csel	x0, x2, x3, EQ
-	ret
-endfunc check_errata_dsu_798953
-
-	/* --------------------------------------------------
-	 * Errata Workaround for DSU erratum #798953.
-	 *
-	 * Can clobber only: x0-x8
-	 * --------------------------------------------------
-	 */
-func errata_dsu_798953_wa
-	mov	x8, x30
-	bl	check_errata_dsu_798953
-	cbz	x0, 1f
-
-	/* If erratum applies, disable high-level clock gating */
-	mrs	x0, CLUSTERACTLR_EL1
-	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
-	msr	CLUSTERACTLR_EL1, x0
-	isb
-1:
-	ret	x8
-endfunc errata_dsu_798953_wa
-
-	/* -----------------------------------------------------------------------
-	 * DSU erratum 936184 check function
-	 * Checks the DSU variant, revision and configuration to determine if
-	 * the erratum applies. Erratum applies if ACP interface is present
-	 * in the DSU and revision-variant < r2p0.
-	 *
-	 * The erratum was fixed in r2p0.
-	 *
-	 * This function is called from both assembly and C environment. So it
-	 * follows AAPCS.
-	 *
-	 * Clobbers: x0-x4
-	 * -----------------------------------------------------------------------
-	 */
-	.globl	check_errata_dsu_936184
-	.globl	errata_dsu_936184_wa
-	.weak	is_scu_present_in_dsu
-
-	/* --------------------------------------------------------------------
-	 * Default behaviour respresents SCU is always present with DSU.
-	 * CPUs can override this definition if required.
-	 *
-	 * Can clobber only: x0-x3
-	 * --------------------------------------------------------------------
-	 */
-func is_scu_present_in_dsu
-	mov	x0, #1
-	ret
-endfunc is_scu_present_in_dsu
-
-func check_errata_dsu_936184
-	mov	x4, x30
-	bl	is_scu_present_in_dsu
-	cmp	x0, xzr
-	/* Default error status */
-	mov	x0, #ERRATA_NOT_APPLIES
-
-	/* If SCU is not present, return without applying patch */
-	b.eq	1f
-
-	/* Erratum applies only if DSU has the ACP interface */
-	mrs	x1, CLUSTERCFR_EL1
-	ubfx	x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
-	cbz	x1, 1f
-
-	/* If ACP is present, check if DSU is older than r2p0 */
-	mrs	x1, CLUSTERIDR_EL1
-
-	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
-	ubfx	x2, x1, #CLUSTERIDR_REV_SHIFT,\
-			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
-	cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
-	b.hs	1f
-	mov	x0, #ERRATA_APPLIES
-1:
-	ret	x4
-endfunc check_errata_dsu_936184
-
-	/* --------------------------------------------------
-	 * Errata Workaround for DSU erratum #936184.
-	 *
-	 * Can clobber only: x0-x8
-	 * --------------------------------------------------
-	 */
-func errata_dsu_936184_wa
-	mov	x8, x30
-	bl	check_errata_dsu_936184
-	cbz	x0, 1f
-
-	/* If erratum applies, we set a mask to a DSU control register */
-	mrs	x0, CLUSTERACTLR_EL1
-	ldr	x1, =DSU_ERRATA_936184_MASK
-	orr	x0, x0, x1
-	msr	CLUSTERACTLR_EL1, x0
-	isb
-1:
-	ret	x8
-endfunc errata_dsu_936184_wa
-
-	/* -----------------------------------------------------------------------
-	 * DSU erratum 2313941 check function
-	 * Checks the DSU variant, revision and configuration to determine if
-	 * the erratum applies. Erratum applies on all configurations of the
-	 * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
-	 *
-	 * The erratum is still open.
-	 *
-	 * This function is called from both assembly and C environment. So it
-	 * follows AAPCS.
-	 *
-	 * Clobbers: x0-x4
-	 * -----------------------------------------------------------------------
-	 */
-	.globl	check_errata_dsu_2313941
-	.globl	errata_dsu_2313941_wa
-
-func check_errata_dsu_2313941
-	mov	x4, x30
-	bl	is_scu_present_in_dsu
-	cmp	x0, xzr
-	/* Default error status */
-	mov	x0, #ERRATA_NOT_APPLIES
-
-	/* If SCU is not present, return without applying patch */
-	b.eq	1f
-
-	mov	x2, #ERRATA_APPLIES
-	mov	x3, #ERRATA_NOT_APPLIES
-
-	/* Check if DSU version is less than or equal to r3p1 */
-	mrs	x1, CLUSTERIDR_EL1
-
-	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
-	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
-			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
-	mov	x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
-	cmp	x0, x1
-	csel	x0, x2, x3, LS
-1:
-	ret	x4
-endfunc check_errata_dsu_2313941
-
-	/* --------------------------------------------------
-	 * Errata Workaround for DSU erratum #2313941.
-	 *
-	 * Can clobber only: x0-x8
-	 * --------------------------------------------------
-	 */
-func errata_dsu_2313941_wa
-	mov	x8, x30
-	bl	check_errata_dsu_2313941
-	cbz	x0, 1f
-
-	/* If erratum applies, disable high-level clock gating */
-	mrs	x0, CLUSTERACTLR_EL1
-	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
-	msr	CLUSTERACTLR_EL1, x0
-	isb
-1:
-	ret	x8
-endfunc errata_dsu_2313941_wa
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
index ef1f048..0a10eed 100644
--- a/lib/cpus/aarch64/generic.S
+++ b/lib/cpus/aarch64/generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,8 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+cpu_reset_prologue generic
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -79,9 +81,10 @@
  * Unimplemented functions.
  * ---------------------------------------------
  */
-.equ	generic_errata_report,		0
 .equ	generic_cpu_reg_dump,		0
-.equ	generic_reset_func,		0
+
+cpu_reset_func_start generic
+cpu_reset_func_end generic
 
 declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
 	generic_reset_func, \
diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S
index 45bd8d3..f37bb28 100644
--- a/lib/cpus/aarch64/neoverse_e1.S
+++ b/lib/cpus/aarch64/neoverse_e1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,6 +7,7 @@
 #include <asm_macros.S>
 #include <common/bl_common.h>
 #include <common/debug.h>
+#include <dsu_macros.S>
 #include <neoverse_e1.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
@@ -21,15 +22,18 @@
 #error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
-/*
- * ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S and applies to neoverse_e1.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184
-.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa
-add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue neoverse_e1
+
+workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end neoverse_e1, ERRATUM(936184)
+
+check_erratum_custom_start neoverse_e1, ERRATUM(936184)
+	branch_if_scu_not_present 2f /* label 1 is used in the macro */
+	check_errata_dsu_936184_impl
+	2:
+	ret
+check_erratum_custom_end neoverse_e1, ERRATUM(936184)
 
 cpu_reset_func_start neoverse_e1
 cpu_reset_func_end neoverse_e1
@@ -42,8 +46,6 @@
 	ret
 endfunc neoverse_e1_cpu_pwr_dwn
 
-errata_report_shim neoverse_e1
-
 .section .rodata.neoverse_e1_regs, "aS"
 neoverse_e1_regs:  /* The ascii list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index f149b5b..5868bf2 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <cpuamu.h>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <neoverse_n1.h>
 #include "wa_cve_2022_23960_bhb_vector.S"
 
@@ -27,15 +28,18 @@
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/*
- * ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S and applies to Neoverse N1.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_n1_936184, check_errata_dsu_936184
-.equ erratum_neoverse_n1_936184_wa, errata_dsu_936184_wa
-add_erratum_entry neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue neoverse_n1
+
+workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184
+	errata_dsu_936184_wa_impl
+workaround_reset_end neoverse_n1, ERRATUM(936184)
+
+check_erratum_custom_start neoverse_n1, ERRATUM(936184)
+	branch_if_scu_not_present 2f /* label 1 is used in the macro */
+	check_errata_dsu_936184_impl
+	2:
+	ret
+check_erratum_custom_end neoverse_n1, ERRATUM(936184)
 
 workaround_reset_start neoverse_n1, ERRATUM(1043202), ERRATA_N1_1043202
 	/* Apply instruction patching sequence */
@@ -112,7 +116,7 @@
 
 check_erratum_ls neoverse_n1, ERRATUM(1315703), CPU_REV(3, 0)
 
-workaround_reset_start neoverse_n1, ERRATUM(1542419), ERRATA_N1_1542419
+workaround_reset_start neoverse_n1, ERRATUM(1542419), ERRATA_N1_1542419, SPLIT_WA
 	/* Apply instruction patching sequence */
 	ldr	x0, =0x0
 	msr	CPUPSELR_EL3, x0
@@ -242,8 +246,6 @@
 	ret
 endfunc neoverse_n1_core_pwr_dwn
 
-errata_report_shim neoverse_n1
-
 /*
  * Handle trap of EL0 IC IVAU instructions to EL3 by executing a TLB
  * inner-shareable invalidation to an arbitrary address followed by a DSB.
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 8b9d64c..df00d61 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -7,6 +7,7 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <cpu_macros.S>
+#include <dsu_macros.S>
 #include <neoverse_n2.h>
 #include "wa_cve_2022_23960_bhb_vector.S"
 
@@ -22,30 +23,11 @@
 
 .global check_erratum_neoverse_n2_3701773
 
-add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773, NO_APPLY_AT_RESET
-
-check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
-
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/*
- * ERRATA_DSU_2313941:
- * The errata is defined in dsu_helpers.S and applies to Neoverse N2.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_n2_2313941, check_errata_dsu_2313941
-.equ erratum_neoverse_n2_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
-
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_n2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_n2, CVE(2024, 5660)
-
-check_erratum_ls neoverse_n2, CVE(2024, 5660), CPU_REV(0, 3)
+cpu_reset_prologue neoverse_n2
 
 workaround_reset_start neoverse_n2, ERRATUM(2002655), ERRATA_N2_2002655
 	/* Apply instruction patching sequence */
@@ -69,18 +51,6 @@
 
 check_erratum_ls neoverse_n2, ERRATUM(2002655), CPU_REV(0, 0)
 
-workaround_reset_start neoverse_n2, ERRATUM(2025414), ERRATA_N2_2025414
-	sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
-workaround_reset_end neoverse_n2, ERRATUM(2025414)
-
-check_erratum_ls neoverse_n2, ERRATUM(2025414), CPU_REV(0, 0)
-
-workaround_reset_start neoverse_n2, ERRATUM(2067956), ERRATA_N2_2067956
-	sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
-workaround_reset_end neoverse_n2, ERRATUM(2067956)
-
-check_erratum_ls neoverse_n2, ERRATUM(2067956), CPU_REV(0, 0)
-
 workaround_runtime_start neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478
 	/* Stash ERRSELR_EL1 in x2 */
 	mrs     x2, ERRSELR_EL1
@@ -97,15 +67,17 @@
 
 check_erratum_ls neoverse_n2, ERRATUM(2009478), CPU_REV(0, 0)
 
-workaround_reset_start neoverse_n2, ERRATUM(2138953), ERRATA_N2_2138953
-	/* Apply instruction patching sequence */
-	mrs	x1, NEOVERSE_N2_CPUECTLR2_EL1
-	mov	x0, #NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV
-	bfi	x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
-	msr	NEOVERSE_N2_CPUECTLR2_EL1, x1
-workaround_reset_end neoverse_n2, ERRATUM(2138953)
+workaround_reset_start neoverse_n2, ERRATUM(2025414), ERRATA_N2_2025414
+	sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
+workaround_reset_end neoverse_n2, ERRATUM(2025414)
 
-check_erratum_ls neoverse_n2, ERRATUM(2138953), CPU_REV(0, 3)
+check_erratum_ls neoverse_n2, ERRATUM(2025414), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2067956), ERRATA_N2_2067956
+	sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
+workaround_reset_end neoverse_n2, ERRATUM(2067956)
+
+check_erratum_ls neoverse_n2, ERRATUM(2067956), CPU_REV(0, 0)
 
 workaround_reset_start neoverse_n2, ERRATUM(2138956), ERRATA_N2_2138956
 	/* Apply instruction patching sequence */
@@ -171,6 +143,18 @@
 
 check_erratum_ls neoverse_n2, ERRATUM(2280757), CPU_REV(0, 0)
 
+workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941
+	errata_dsu_2313941_wa_impl
+workaround_reset_end neoverse_n2, ERRATUM(2313941)
+
+check_erratum_custom_start neoverse_n2, ERRATUM(2313941)
+	branch_if_scu_not_present 2f /* label 1 is used in the macro */
+	check_errata_dsu_2313941_impl
+	2:
+	ret
+check_erratum_custom_end neoverse_n2, ERRATUM(2313941)
+
+.global erratum_neoverse_n2_2326639_wa
 workaround_runtime_start neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639
 	/* Set bit 36 in ACTLR2_EL1 */
 	sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_36
@@ -234,6 +218,10 @@
 
 check_erratum_ls neoverse_n2, ERRATUM(2779511), CPU_REV(0, 2)
 
+add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773
+
+check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
+
 workaround_reset_start neoverse_n2, CVE(2022,23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -246,6 +234,13 @@
 
 check_erratum_chosen neoverse_n2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_n2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_n2, CVE(2024, 5660)
+
+check_erratum_ls neoverse_n2, CVE(2024, 5660), CPU_REV(0, 3)
+
 	/* -------------------------------------------
 	 * The CPU Ops reset function for Neoverse N2.
 	 * -------------------------------------------
@@ -294,8 +289,6 @@
 	ret
 endfunc neoverse_n2_core_pwr_dwn
 
-errata_report_shim neoverse_n2
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse N2 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/neoverse_n3.S b/lib/cpus/aarch64/neoverse_n3.S
index b484b26..f46e45d 100644
--- a/lib/cpus/aarch64/neoverse_n3.S
+++ b/lib/cpus/aarch64/neoverse_n3.S
@@ -21,9 +21,11 @@
 #error "Neoverse-N3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue neoverse_n3
+
 .global check_erratum_neoverse_n3_3699563
 
-add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563, NO_APPLY_AT_RESET
+add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563
 
 check_erratum_ls neoverse_n3, ERRATUM(3699563), CPU_REV(0, 0)
 
@@ -46,8 +48,6 @@
 	ret
 endfunc neoverse_n3_core_pwr_dwn
 
-errata_report_shim neoverse_n3
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse-N3 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
deleted file mode 100644
index b816342..0000000
--- a/lib/cpus/aarch64/neoverse_n_common.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <neoverse_n_common.h>
-
-	.global is_scu_present_in_dsu
-
-/*
- * Check if the SCU L3 Unit is present on the DSU
- * 1-> SCU present
- * 0-> SCU not present
- *
- * This function is implemented as weak on dsu_helpers.S and must be
- * overwritten for Neoverse Nx cores.
- */
-
-func is_scu_present_in_dsu
-	mrs	x0, CPUCFR_EL1
-	ubfx	x0, x0, #SCU_SHIFT, #1
-	eor	x0, x0, #1
-	ret
-endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index b131cf8..f975be0 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -22,17 +22,12 @@
 #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue neoverse_v1
+
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v1, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v1, CVE(2024, 5660), CPU_REV(1, 2)
-
 workaround_reset_start neoverse_v1, ERRATUM(1618635), ERRATA_V1_1618635
 	/* Inserts a DMB SY before and after MRS PAR_EL1 */
 	ldr	x0, =0x0
@@ -161,15 +156,6 @@
 
 check_erratum_range neoverse_v1, ERRATUM(1966096), CPU_REV(1, 0), CPU_REV(1, 1)
 
-workaround_reset_start neoverse_v1, ERRATUM(2108267), ERRATA_V1_2108267
-	mrs	x1, NEOVERSE_V1_CPUECTLR_EL1
-	mov	x0, #NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV
-	bfi	x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
-	msr	NEOVERSE_V1_CPUECTLR_EL1, x1
-workaround_reset_end neoverse_v1, ERRATUM(2108267)
-
-check_erratum_ls neoverse_v1, ERRATUM(2108267), CPU_REV(1, 2)
-
 workaround_reset_start neoverse_v1, ERRATUM(2139242), ERRATA_V1_2139242
 	mov	x0, #0x3
 	msr	S3_6_C15_C8_0, x0
@@ -250,6 +236,13 @@
 
 check_erratum_chosen neoverse_v1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v1, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v1, CVE(2024, 5660), CPU_REV(1, 2)
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -266,8 +259,6 @@
 	ret
 endfunc neoverse_v1_core_pwr_dwn
 
-errata_report_shim neoverse_v1
-
 cpu_reset_func_start neoverse_v1
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index 9f14155..e6147a9 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -22,19 +22,7 @@
 #error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v2, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v2, CVE(2024, 5660), CPU_REV(0, 2)
-
-workaround_reset_start neoverse_v2, ERRATUM(2331132), ERRATA_V2_2331132
-	sysreg_bitfield_insert NEOVERSE_V2_CPUECTLR2_EL1, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
-		NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end neoverse_v2, ERRATUM(2331132)
-
-check_erratum_ls neoverse_v2, ERRATUM(2331132), CPU_REV(0, 2)
+cpu_reset_prologue neoverse_v2
 
 workaround_reset_start neoverse_v2, ERRATUM(2618597), ERRATA_V2_2618597
         /* Disable retention control for WFI and WFE. */
@@ -93,6 +81,13 @@
 
 check_erratum_chosen neoverse_v2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v2, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v2, CVE(2024, 5660), CPU_REV(0, 2)
+
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
 #endif /* WORKAROUND_CVE_2022_23960 */
@@ -129,7 +124,6 @@
 	msr	SSBS, xzr
 cpu_reset_func_end neoverse_v2
 
-errata_report_shim neoverse_v2
 	/* ---------------------------------------------
 	 * This function provides Neoverse V2-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S
index 9303e39..2ead062 100644
--- a/lib/cpus/aarch64/neoverse_v3.S
+++ b/lib/cpus/aarch64/neoverse_v3.S
@@ -22,12 +22,10 @@
 #error "Neoverse V3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue neoverse_v3
+
 .global check_erratum_neoverse_v3_3701767
 
-add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767, NO_APPLY_AT_RESET
-
-check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2)
-
 workaround_reset_start neoverse_v3, ERRATUM(2970647), ERRATA_V3_2970647
 	/* Add ISB before MRS reads of MPIDR_EL1/MIDR_EL1 */
 	ldr x0, =0x1
@@ -43,17 +41,14 @@
 
 check_erratum_ls neoverse_v3, ERRATUM(2970647), CPU_REV(0, 0)
 
+add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767
+
+check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2)
+
 #if WORKAROUND_CVE_2022_23960
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V3_BHB_LOOP_COUNT, neoverse_v3
 #endif /* WORKAROUND_CVE_2022_23960 */
 
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
-	sysreg_bit_set NEOVERSE_V3_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v3, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v3, CVE(2024, 5660), CPU_REV(0, 1)
-
 workaround_reset_start neoverse_v3, CVE(2022,23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -67,6 +62,13 @@
 
 check_erratum_chosen neoverse_v3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+	sysreg_bit_set NEOVERSE_V3_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v3, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v3, CVE(2024, 5660), CPU_REV(0, 1)
+
 workaround_reset_start neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
        /* ---------------------------------
         * Sets BIT41 of CPUACTLR6_EL1 which
@@ -99,8 +101,6 @@
 	msr	SSBS, xzr
 cpu_reset_func_end neoverse_v3
 
-errata_report_shim neoverse_v3
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse V3 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S
index 36830a9..0d04e65 100644
--- a/lib/cpus/aarch64/nevis.S
+++ b/lib/cpus/aarch64/nevis.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,8 @@
 #error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue nevis
+
 cpu_reset_func_start nevis
 	/* ----------------------------------------------------
 	 * Disable speculative loads
@@ -40,8 +42,6 @@
 	ret
 endfunc nevis_core_pwr_dwn
 
-errata_report_shim nevis
-
 .section .rodata.nevis_regs, "aS"
 nevis_regs: /* The ASCII list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index 00963bc..a727379 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,6 +8,8 @@
 #include <cpu_macros.S>
 #include <qemu_max.h>
 
+cpu_reset_prologue qemu_max
+
 func qemu_max_core_pwr_dwn
 	/* ---------------------------------------------
 	 * Disable the Data Cache.
@@ -47,7 +49,8 @@
 	b	dcsw_op_all
 endfunc qemu_max_cluster_pwr_dwn
 
-errata_report_shim qemu_max
+cpu_reset_func_start qemu_max
+cpu_reset_func_end qemu_max
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
@@ -69,6 +72,6 @@
 
 
 /* cpu_ops for QEMU MAX */
-declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops qemu_max, QEMU_MAX_MIDR, qemu_max_reset_func, \
 	qemu_max_core_pwr_dwn, \
 	qemu_max_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
index c770f54..9ad9362 100644
--- a/lib/cpus/aarch64/rainier.S
+++ b/lib/cpus/aarch64/rainier.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,6 +21,8 @@
 #error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue rainier
+
 /* --------------------------------------------------
  * Disable speculative loads if Rainier supports
  * SSBS.
@@ -80,8 +82,6 @@
 	ret
 endfunc rainier_core_pwr_dwn
 
-errata_report_shim rainier
-
 	/* ---------------------------------------------
 	 * This function provides Rainier specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S
index 2abefe9..99731e2 100644
--- a/lib/cpus/aarch64/travis.S
+++ b/lib/cpus/aarch64/travis.S
@@ -21,6 +21,8 @@
 #error "Travis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+cpu_reset_prologue travis
+
 cpu_reset_func_start travis
 	/* ----------------------------------------------------
 	 * Disable speculative loads
@@ -54,8 +56,6 @@
 	ret
 endfunc travis_core_pwr_dwn
 
-errata_report_shim travis
-
 .section .rodata.travis_regs, "aS"
 travis_regs: /* The ASCII list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 4b9e56e..b749d99 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -314,10 +314,6 @@
 # to revision r0p0 of the A78 cpu and was fixed in the revision r1p0.
 CPU_FLAG_LIST += ERRATA_A78_1952683
 
-# Flag to apply erratum 2132060 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
-CPU_FLAG_LIST += ERRATA_A78_2132060
-
 # Flag to apply erratum 2242635 workaround during reset. This erratum applies
 # to revisions r1p0, r1p1, and r1p2 of the A78 cpu and is open. The issue is
 # present in r0p0 as well but there is no workaround for that revision.
@@ -379,10 +375,6 @@
 # It is still open.
 CPU_FLAG_LIST += ERRATA_A78_AE_2712574
 
-# Flag to apply erratum 2132064 workaround during reset. This erratum applies
-# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
-CPU_FLAG_LIST += ERRATA_A78C_2132064
-
 # Flag to apply erratum 2242638 workaround during reset. This erratum applies
 # to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
 CPU_FLAG_LIST += ERRATA_A78C_2242638
@@ -522,10 +514,6 @@
 # to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_V1_2139242
 
-# Flag to apply erratum 2108267 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_V1_2108267
-
 # Flag to apply erratum 2216392 workaround during reset. This erratum applies
 # to revisions r1p0 and r1p1 of the Neoverse V1 cpu and is still open. This
 # issue exists in r0p0 as well but there is no workaround for that revision.
@@ -584,11 +572,6 @@
 # to revision r2p0 of the Cortex-A710 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_A710_2083908
 
-# Flag to apply erratum 2058056 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is still
-# open.
-CPU_FLAG_LIST += ERRATA_A710_2058056
-
 # Flag to apply erratum 2055002 workaround during reset. This erratum applies
 # to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_A710_2055002
@@ -679,10 +662,6 @@
 # to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
 CPU_FLAG_LIST += ERRATA_N2_2138956
 
-# Flag to apply erratum 2138953 workaround during reset. This erratum applies
-# to revision r0p0, r0p1, r0p2, r0p3 of the Neoverse N2 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_N2_2138953
-
 # Flag to apply erratum 2242415 workaround during reset. This erratum applies
 # to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
 CPU_FLAG_LIST += ERRATA_N2_2242415
@@ -750,10 +729,6 @@
 # to revisions r0p0, r1p0, and r2p0 of the Cortex-X2 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_X2_2002765
 
-# Flag to apply erratum 2058056 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_X2_2058056
-
 # Flag to apply erratum 2083908 workaround during reset. This erratum applies
 # to revision r2p0 of the Cortex-X2 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_X2_2083908
@@ -809,11 +784,6 @@
 # of the Cortex-X2 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_X2_3701772
 
-# Flag to apply erratum 2070301 workaround on reset. This erratum applies
-# to revisions r0p0, r1p0, r1p1 and r1p2 of the Cortex-X3 cpu and is
-# still open.
-CPU_FLAG_LIST += ERRATA_X3_2070301
-
 # Flag to apply erratum 2266875 workaround during reset. This erratum applies
 # to revisions r0p0 and r1p0 of the Cortex-X3 cpu, it is fixed in r1p1.
 CPU_FLAG_LIST += ERRATA_X3_2266875
@@ -965,6 +935,11 @@
 # Cortex-A510 cpu and is fixed in r1p3.
 CPU_FLAG_LIST += ERRATA_A510_2684597
 
+# Flag to apply erratum 2971420 workaround during context switch. This erratum
+# applies to revisions r0p1, r0p2, r0p3, r1p0, r1p1, r1p2 and r1p3 of the
+# Cortex-A510 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A510_2971420
+
 # Flag to apply erratum 2630792 workaround during reset. This erratum applies
 # to revisions r0p0, r0p1 of the Cortex-A520 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_A520_2630792
@@ -977,10 +952,6 @@
 # applies to revision r0p0 and r0p1 of the Cortex-A520 cpu and is fixed in r0p2.
 CPU_FLAG_LIST += ERRATA_A520_2938996
 
-# Flag to apply erratum 2331132 workaround during reset. This erratum applies
-# to revisions r0p0, r0p1 and r0p2. It is still open.
-CPU_FLAG_LIST += ERRATA_V2_2331132
-
 # Flag to apply erratum 2618597 workaround during reset. This erratum applies
 # to revisions r0p0 and r0p1. It is fixed in r0p2.
 CPU_FLAG_LIST += ERRATA_V2_2618597
@@ -1037,6 +1008,10 @@
 # only to revision r0p0, r1p0 and r1p1. It is fixed in r1p2.
 CPU_FLAG_LIST += ERRATA_A715_2728106
 
+# Flag to apply erratum 2804830 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1 and r1p2. It is fixed in r1p3.
+CPU_FLAG_LIST += ERRATA_A715_2804830
+
 # Flag to apply erratum 3699560 workaround during context save/restore of
 # ICH_VMCR_EL2 reg. This erratum applies to revisions r0p0, r1p0, r1p2, r1p3
 # of the Cortex-A715 cpu and is still open.
diff --git a/lib/cpus/errata_common.c b/lib/cpus/errata_common.c
index ba9c676..7dcc699 100644
--- a/lib/cpus/errata_common.c
+++ b/lib/cpus/errata_common.c
@@ -9,6 +9,7 @@
 #include <arch.h>
 #include <arch_helpers.h>
 #include <cortex_a75.h>
+#include <cortex_a510.h>
 #include <cortex_a520.h>
 #include <cortex_a710.h>
 #include <cortex_a715.h>
@@ -24,21 +25,26 @@
 #include <neoverse_n3.h>
 #include <neoverse_v3.h>
 
-#if ERRATA_A520_2938996 || ERRATA_X4_2726228
-unsigned int check_if_affected_core(void)
+bool check_if_trbe_disable_affected_core(void)
 {
-	uint32_t midr_val = read_midr();
-	long rev_var  = cpu_get_rev_var();
-
-	if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_A520_MIDR)) {
-		return check_erratum_cortex_a520_2938996(rev_var);
-	} else if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_X4_MIDR)) {
-		return check_erratum_cortex_x4_2726228(rev_var);
-	}
-
-	return ERRATA_NOT_APPLIES;
-}
+	switch (EXTRACT_PARTNUM(read_midr())) {
+#if ERRATA_A520_2938996
+	case EXTRACT_PARTNUM(CORTEX_A520_MIDR):
+		return check_erratum_cortex_a520_2938996(cpu_get_rev_var()) == ERRATA_APPLIES;
 #endif
+#if ERRATA_X4_2726228
+	case EXTRACT_PARTNUM(CORTEX_X4_MIDR):
+		return check_erratum_cortex_x4_2726228(cpu_get_rev_var()) == ERRATA_APPLIES;
+#endif
+#if ERRATA_A510_2971420
+	case EXTRACT_PARTNUM(CORTEX_A510_MIDR):
+		return check_erratum_cortex_a510_2971420(cpu_get_rev_var()) == ERRATA_APPLIES;
+#endif
+	default:
+		break;
+	}
+	return false;
+}
 
 #if ERRATA_A75_764081
 bool errata_a75_764081_applies(void)
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
index 4e9bdfc..03b6a28 100644
--- a/lib/cpus/errata_report.c
+++ b/lib/cpus/errata_report.c
@@ -67,7 +67,7 @@
  * save space. This functionality is only useful on development and platform
  * bringup builds, when FEATURE_DETECTION should be used anyway
  */
-void __unused generic_errata_report(void)
+void generic_errata_report(void)
 {
 	struct cpu_ops *cpu_ops = get_cpu_ops_ptr();
 	struct erratum_entry *entry = cpu_ops->errata_list_start;
@@ -77,7 +77,6 @@
 	uint32_t last_erratum_id = 0;
 	uint16_t last_cve_yr = 0;
 	bool check_cve = false;
-	bool failed = false;
 #endif /* FEATURE_DETECTION */
 
 	for (; entry != end; entry += 1) {
@@ -100,30 +99,20 @@
 		if (entry->cve) {
 			if (last_cve_yr > entry->cve ||
 			   (last_cve_yr == entry->cve && last_erratum_id >= entry->id)) {
-				ERROR("CVE %u_%u was out of order!\n",
+				WARN("CVE %u_%u was out of order!\n",
 				      entry->cve, entry->id);
-				failed = true;
 			}
 			check_cve = true;
 			last_cve_yr = entry->cve;
 		} else {
 			if (last_erratum_id >= entry->id || check_cve) {
-				ERROR("Erratum %u was out of order!\n",
+				WARN("Erratum %u was out of order!\n",
 				      entry->id);
-				failed = true;
 			}
 		}
 		last_erratum_id = entry->id;
 #endif /* FEATURE_DETECTION */
 	}
-
-#if FEATURE_DETECTION
-	/*
-	 * enforce errata and CVEs are in ascending order and that CVEs are
-	 * after errata
-	 */
-	assert(!failed);
-#endif /* FEATURE_DETECTION */
 }
 
 /*
@@ -159,70 +148,16 @@
  */
 void print_errata_status(void)
 {
-	struct cpu_ops *cpu_ops;
 #ifdef IMAGE_BL1
-	/*
-	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
-	 * directly.
-	 */
-	cpu_ops = get_cpu_ops_ptr();
-
-	if (cpu_ops->errata_func != NULL) {
-		cpu_ops->errata_func();
-	}
+	generic_errata_report();
 #else /* IMAGE_BL1 */
-	cpu_ops = (void *) get_cpu_data(cpu_ops_ptr);
+	struct cpu_ops *cpu_ops = (void *) get_cpu_data(cpu_ops_ptr);
 
 	assert(cpu_ops != NULL);
 
-	if (cpu_ops->errata_func == NULL) {
-		return;
-	}
-
 	if (errata_needs_reporting(cpu_ops->errata_lock, cpu_ops->errata_reported)) {
-		cpu_ops->errata_func();
+		generic_errata_report();
 	}
 #endif /* IMAGE_BL1 */
 }
-
-/*
- * Old errata status message printer
- * TODO: remove once all cpus have been converted to the new printing method
- */
-void __unused errata_print_msg(unsigned int status, const char *cpu, const char *id)
-{
-	/* Errata status strings */
-	static const char *const errata_status_str[] = {
-		[ERRATA_NOT_APPLIES] = "not applied",
-		[ERRATA_APPLIES] = "applied",
-		[ERRATA_MISSING] = "missing!"
-	};
-	static const char *const __unused bl_str = BL_STRING;
-	const char *msg __unused;
-
-
-	assert(status < ARRAY_SIZE(errata_status_str));
-	assert(cpu != NULL);
-	assert(id != NULL);
-
-	msg = errata_status_str[status];
-
-	switch (status) {
-	case ERRATA_NOT_APPLIES:
-		VERBOSE(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	case ERRATA_APPLIES:
-		INFO(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	case ERRATA_MISSING:
-		WARN(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	default:
-		WARN(ERRATA_FORMAT, bl_str, cpu, id, "unknown");
-		break;
-	}
-}
 #endif /* !REPORT_ERRATA */
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 7472d61..d2fdfdd 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -1425,7 +1425,7 @@
 {
 #ifdef IMAGE_BL31
 #if ERRATA_A520_2938996 || ERRATA_X4_2726228
-	if (check_if_affected_core() == ERRATA_APPLIES) {
+	if (check_if_trbe_disable_affected_core()) {
 		if (is_feat_trbe_supported()) {
 			trbe_disable();
 		}
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
index 313f882..02d9415 100644
--- a/lib/el3_runtime/aarch64/cpu_data.S
+++ b/lib/el3_runtime/aarch64/cpu_data.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,29 +7,9 @@
 #include <asm_macros.S>
 #include <lib/el3_runtime/cpu_data.h>
 
-.globl	init_cpu_data_ptr
 .globl	_cpu_data_by_index
 
 /* -----------------------------------------------------------------
- * void init_cpu_data_ptr(void)
- *
- * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
- * for the calling CPU. This must be called before cm_get_cpu_data()
- *
- * This can be called without a valid stack. It assumes that
- * plat_my_core_pos() does not clobber register x10.
- * clobbers: x0, x1, x10
- * -----------------------------------------------------------------
- */
-func init_cpu_data_ptr
-	mov	x10, x30
-	bl	plat_my_core_pos
-	bl	_cpu_data_by_index
-	msr	tpidr_el3, x0
-	ret	x10
-endfunc init_cpu_data_ptr
-
-/* -----------------------------------------------------------------
  * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
  *
  * Return the cpu_data structure for the CPU with given linear index
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index cca08c1..088ab43 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -131,4 +131,12 @@
 1:
 	wfi
 	b	1b
+
+	/*
+	 * in case the WFI wasn't terminal, we have to undo errata mitigations.
+	 * These will be smart enough to handle being called the same way
+	 */
+	apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
+	apply_erratum cortex_x3,   ERRATUM(2313909), ERRATA_X3_2313909, NO_GET_CPU_REV
+	apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
 endfunc psci_power_down_wfi
diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk
index 9d24563..32fb3f5 100644
--- a/plat/arm/board/arm_fpga/platform.mk
+++ b/plat/arm/board/arm_fpga/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+# Copyright (c) 2021-2025, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -73,7 +73,6 @@
 				lib/cpus/aarch64/cortex_a720.S			\
 				lib/cpus/aarch64/cortex_x3.S 			\
 				lib/cpus/aarch64/cortex_x4.S			\
-				lib/cpus/aarch64/neoverse_n_common.S		\
 				lib/cpus/aarch64/neoverse_n1.S			\
 				lib/cpus/aarch64/neoverse_n2.S			\
 				lib/cpus/aarch64/neoverse_v1.S			\
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 604b1a0..f867da8 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -210,7 +210,6 @@
 					lib/cpus/aarch64/cortex_a710.S		\
 					lib/cpus/aarch64/cortex_a715.S		\
 					lib/cpus/aarch64/cortex_a720.S		\
-					lib/cpus/aarch64/neoverse_n_common.S	\
 					lib/cpus/aarch64/neoverse_n1.S		\
 					lib/cpus/aarch64/neoverse_n2.S		\
 					lib/cpus/aarch64/neoverse_v1.S		\
diff --git a/plat/arm/board/rdn2/platform.mk b/plat/arm/board/rdn2/platform.mk
index f5fc1ad..5c9951f 100644
--- a/plat/arm/board/rdn2/platform.mk
+++ b/plat/arm/board/rdn2/platform.mk
@@ -103,7 +103,6 @@
 ERRATA_N2_2025414	:=	1
 ERRATA_N2_2189731	:=	1
 ERRATA_N2_2138956	:=	1
-ERRATA_N2_2138953	:=	1
 ERRATA_N2_2242415	:=	1
 ERRATA_N2_2138958	:=	1
 ERRATA_N2_2242400	:=	1
diff --git a/plat/mediatek/mt8188/plat_config.mk b/plat/mediatek/mt8188/plat_config.mk
index 2e3392f..765b743 100644
--- a/plat/mediatek/mt8188/plat_config.mk
+++ b/plat/mediatek/mt8188/plat_config.mk
@@ -27,7 +27,6 @@
 ERRATA_A78_1941498 := 1
 ERRATA_A78_1951500 := 1
 ERRATA_A78_1821534 := 1
-ERRATA_A78_2132060 := 1
 ERRATA_A78_2242635 := 1
 ERRATA_A78_2376745 := 1
 ERRATA_A78_2395406 := 1
diff --git a/plat/mediatek/mt8195/platform.mk b/plat/mediatek/mt8195/platform.mk
index 48dafa3..e604d4f 100644
--- a/plat/mediatek/mt8195/platform.mk
+++ b/plat/mediatek/mt8195/platform.mk
@@ -99,7 +99,6 @@
 ERRATA_A78_1941498 := 1
 ERRATA_A78_1951500 := 1
 ERRATA_A78_1821534 := 1
-ERRATA_A78_2132060 := 1
 ERRATA_A78_2242635 := 1
 
 # indicate the reset vector address can be programmed
diff --git a/plat/qemu/common/common.mk b/plat/qemu/common/common.mk
index 2dcac69..ae314a5 100644
--- a/plat/qemu/common/common.mk
+++ b/plat/qemu/common/common.mk
@@ -22,7 +22,6 @@
 				lib/cpus/aarch64/cortex_a72.S		\
 				lib/cpus/aarch64/cortex_a76.S		\
 				lib/cpus/aarch64/cortex_a710.S		\
-				lib/cpus/aarch64/neoverse_n_common.S	\
 				lib/cpus/aarch64/neoverse_n1.S		\
 				lib/cpus/aarch64/neoverse_v1.S		\
 				lib/cpus/aarch64/neoverse_n2.S		\
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_gold.S b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
index 9bcdf54..49b7cf0 100644
--- a/plat/qti/common/src/aarch64/qti_kryo4_gold.S
+++ b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -41,16 +41,6 @@
 	ret
 endfunc qti_kryo4_gold_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Gold. Must follow AAPCS.
- */
-func qti_kryo4_gold_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo4_gold_errata_report
-#endif
-
 /* ---------------------------------------------
  * This function provides kryo4_gold specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_silver.S b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
index 36374b7..4a98912 100644
--- a/plat/qti/common/src/aarch64/qti_kryo4_silver.S
+++ b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -35,17 +35,6 @@
 	ret
 endfunc qti_kryo4_silver_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Silver. Must follow AAPCS.
- */
-func qti_kryo4_silver_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo4_silver_errata_report
-#endif
-
-
 /* ---------------------------------------------
  * This function provides kryo4_silver specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo6_gold.S b/plat/qti/common/src/aarch64/qti_kryo6_gold.S
index 577e7ff..5f9463f 100644
--- a/plat/qti/common/src/aarch64/qti_kryo6_gold.S
+++ b/plat/qti/common/src/aarch64/qti_kryo6_gold.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -41,16 +41,6 @@
 	ret
 endfunc qti_kryo6_gold_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Gold. Must follow AAPCS.
- */
-func qti_kryo6_gold_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo6_gold_errata_report
-#endif
-
 /* ---------------------------------------------
  * This function provides kryo4_gold specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo6_silver.S b/plat/qti/common/src/aarch64/qti_kryo6_silver.S
index 6ad0bca..4a54a64 100644
--- a/plat/qti/common/src/aarch64/qti_kryo6_silver.S
+++ b/plat/qti/common/src/aarch64/qti_kryo6_silver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -35,17 +35,6 @@
 	ret
 endfunc qti_kryo6_silver_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Silver. Must follow AAPCS.
- */
-func qti_kryo6_silver_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo6_silver_errata_report
-#endif
-
-
 /* ---------------------------------------------
  * This function provides kryo4_silver specific
  * register information for crash reporting.
diff --git a/plat/qti/sc7280/platform.mk b/plat/qti/sc7280/platform.mk
index 3d7d728..0b5ae52 100644
--- a/plat/qti/sc7280/platform.mk
+++ b/plat/qti/sc7280/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
 # Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
@@ -20,7 +20,6 @@
 ERRATA_A55_1530923 		:=	1
 ERRATA_A78_1941498 		:=	1
 ERRATA_A78_1951500 		:=	1
-ERRATA_A78_2132060 		:=	1
 
 # Disable the PSCI platform compatibility layer
 ENABLE_PLAT_COMPAT		:=	0
diff --git a/services/std_svc/errata_abi/errata_abi_main.c b/services/std_svc/errata_abi/errata_abi_main.c
index 0a1d4f3..d688d7c 100644
--- a/services/std_svc/errata_abi/errata_abi_main.c
+++ b/services/std_svc/errata_abi/errata_abi_main.c
@@ -197,8 +197,11 @@
 	while ((entry <= end) && (ret_val == EM_UNKNOWN_ERRATUM)) {
 		if (entry->id == errata_id) {
 			if (entry->check_func(rev_var)) {
-				if (entry->chosen)
-					return EM_HIGHER_EL_MITIGATION;
+				if (entry->chosen & WA_ENABLED_MASK)
+					if (entry->chosen & SPLIT_WA_MASK)
+						return EM_AFFECTED;
+					else
+						return EM_HIGHER_EL_MITIGATION;
 				else
 					return EM_AFFECTED;
 			}