Merge "Revert "libc/memset: Implement function in assembler"" into integration
diff --git a/common/backtrace/backtrace.c b/common/backtrace/backtrace.c
index ef57500..a07c066 100644
--- a/common/backtrace/backtrace.c
+++ b/common/backtrace/backtrace.c
@@ -70,7 +70,7 @@
 	} else if (el == 2U) {
 		ats1e2r(addr);
 	} else {
-		ats1e1r(addr);
+		AT(ats1e1r, addr);
 	}
 
 	isb();
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 6b6c639..33b5090 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -127,6 +127,9 @@
    Earlier revisions of the CPU have other errata which require the same
    workaround in software, so they should be covered anyway.
 
+-  ``ERRATA_A53_1530924``: This applies errata 1530924 workaround to all
+   revisions of Cortex-A53 CPU.
+
 For Cortex-A55, the following errata build flags are defined :
 
 -  ``ERRATA_A55_768277``: This applies errata 768277 workaround to Cortex-A55
@@ -147,6 +150,9 @@
 -  ``ERRATA_A55_1221012``: This applies errata 1221012 workaround to Cortex-A55
    CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
 
+-  ``ERRATA_A55_1530923``: This applies errata 1530923 workaround to all
+   revisions of Cortex-A55 CPU.
+
 For Cortex-A57, the following errata build flags are defined :
 
 -  ``ERRATA_A57_806969``: This applies errata 806969 workaround to Cortex-A57
@@ -182,12 +188,17 @@
 -  ``ERRATA_A57_859972``: This applies errata 859972 workaround to Cortex-A57
    CPU. This needs to be enabled only for revision <= r1p3 of the CPU.
 
+-  ``ERRATA_A57_1319537``: This applies errata 1319537 workaround to all
+   revisions of Cortex-A57 CPU.
 
 For Cortex-A72, the following errata build flags are defined :
 
 -  ``ERRATA_A72_859971``: This applies errata 859971 workaround to Cortex-A72
    CPU. This needs to be enabled only for revision <= r0p3 of the CPU.
 
+-  ``ERRATA_A72_1319367``: This applies errata 1319367 workaround to all
+   revisions of Cortex-A72 CPU.
+
 For Cortex-A73, the following errata build flags are defined :
 
 -  ``ERRATA_A73_852427``: This applies errata 852427 workaround to Cortex-A73
@@ -233,6 +244,11 @@
 -  ``ERRATA_A76_1800710``: This applies errata 1800710 workaround to Cortex-A76
    CPU. This needs to be enabled only for revision <= r4p0 of the CPU.
 
+-  ``ERRATA_A76_1165522``: This applies errata 1165522 workaround to all
+   revisions of Cortex-A76 CPU. This errata is fixed in r3p0 but due to
+   limitation of errata framework this errata is applied to all revisions
+   of Cortex-A76 CPU.
+
 For Cortex-A77, the following errata build flags are defined :
 
 -  ``ERRATA_A77_1800714``: This applies errata 1800714 workaround to Cortex-A77
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index ae328c7..b4fe404 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -694,28 +694,36 @@
    default value of this flag is ``no``. Note this option must be enabled only
    for ARM architecture greater than Armv8.5-A.
 
--  ``ERRATA_SPECULATIVE_AT``: This flag enables/disables page table walk during
-   context restore as speculative AT instructions using an out-of-context
-   translation regime could cause subsequent requests to generate an incorrect
-   translation.
-   System registers are not updated during context save, hence this workaround
-   need not be applied in the context save path.
+-  ``ERRATA_SPECULATIVE_AT``: This flag determines whether to enable ``AT``
+   speculative errata workaround or not. It accepts 2 values: ``1`` and ``0``.
+   The default value of this flag is ``0``.
+
+   ``AT`` speculative errata workaround disables stage1 page table walk for
+   lower ELs (EL1 and EL0) in EL3 so that ``AT`` speculative fetch at any point
+   produces either the correct result or failure without TLB allocation.
 
    This boolean option enables errata for all below CPUs.
 
-   +---------+--------------+
-   | Errata  |      CPU     |
-   +=========+==============+
-   | 1165522 |  Cortex-A76  |
-   +---------+--------------+
-   | 1319367 |  Cortex-A72  |
-   +---------+--------------+
-   | 1319537 |  Cortex-A57  |
-   +---------+--------------+
-   | 1530923 |  Cortex-A55  |
-   +---------+--------------+
-   | 1530924 |  Cortex-A53  |
-   +---------+--------------+
+   +---------+--------------+-------------------------+
+   | Errata  |      CPU     |     Workaround Define   |
+   +=========+==============+=========================+
+   | 1165522 |  Cortex-A76  |  ``ERRATA_A76_1165522`` |
+   +---------+--------------+-------------------------+
+   | 1319367 |  Cortex-A72  |  ``ERRATA_A72_1319367`` |
+   +---------+--------------+-------------------------+
+   | 1319537 |  Cortex-A57  |  ``ERRATA_A57_1319537`` |
+   +---------+--------------+-------------------------+
+   | 1530923 |  Cortex-A55  |  ``ERRATA_A55_1530923`` |
+   +---------+--------------+-------------------------+
+   | 1530924 |  Cortex-A53  |  ``ERRATA_A53_1530924`` |
+   +---------+--------------+-------------------------+
+
+   .. note::
+      This option is enabled by build only if platform sets any of above defines
+      mentioned in ’Workaround Define' column in the table.
+      If this option is enabled for the EL3 software then EL2 software also must
+      implement this workaround due to the behaviour of the errata mentioned
+      in new SDEN document which will get published soon.
 
 - ``RAS_TRAP_LOWER_EL_ERR_ACCESS``: This flag enables/disables the SCR_EL3.TERR
   bit, to trap access to the RAS ERR and RAS ERX registers from lower ELs.
diff --git a/docs/glossary.rst b/docs/glossary.rst
index e087079..08add3a 100644
--- a/docs/glossary.rst
+++ b/docs/glossary.rst
@@ -18,6 +18,9 @@
    API
       Application Programming Interface
 
+   AT
+      Address Translation
+
    BTI
       Branch Target Identification. An Armv8.5 extension providing additional
       control flow integrity around indirect branches and their targets.
diff --git a/docs/process/coding-guidelines.rst b/docs/process/coding-guidelines.rst
index f7d53a9..9708604 100644
--- a/docs/process/coding-guidelines.rst
+++ b/docs/process/coding-guidelines.rst
@@ -95,10 +95,13 @@
 be compiled into debug builds and all statements with a log level
 ``<= LOG_LEVEL_NOTICE`` will be compiled into release builds. This can be
 overridden from the command line or by the platform makefile (although it may be
-necessary to clean the build directory first). For example, to enable
-``VERBOSE`` logging on FVP:
+necessary to clean the build directory first).
 
-``make PLAT=fvp LOG_LEVEL=50 all``
+For example, to enable ``VERBOSE`` logging on FVP:
+
+.. code:: shell
+
+  make PLAT=fvp LOG_LEVEL=50 all
 
 Use const data where possible
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/drivers/allwinner/axp/common.c b/drivers/allwinner/axp/common.c
index 13437fe..e98b16f 100644
--- a/drivers/allwinner/axp/common.c
+++ b/drivers/allwinner/axp/common.c
@@ -105,6 +105,25 @@
 	return false;
 }
 
+static bool board_uses_usb0_host_mode(const void *fdt)
+{
+	int node, length;
+	const char *prop;
+
+	node = fdt_node_offset_by_compatible(fdt, -1,
+					     "allwinner,sun8i-a33-musb");
+	if (node < 0) {
+		return false;
+	}
+
+	prop = fdt_getprop(fdt, node, "dr_mode", &length);
+	if (!prop) {
+		return false;
+	}
+
+	return !strncmp(prop, "host", length);
+}
+
 void axp_setup_regulators(const void *fdt)
 {
 	int node;
@@ -121,7 +140,8 @@
 	}
 
 	/* This applies to AXP803 only. */
-	if (fdt_getprop(fdt, node, "x-powers,drive-vbus-en", NULL)) {
+	if (fdt_getprop(fdt, node, "x-powers,drive-vbus-en", NULL) &&
+	    board_uses_usb0_host_mode(fdt)) {
 		axp_clrbits(0x8f, BIT(4));
 		axp_setbits(0x30, BIT(2));
 		INFO("PMIC: Enabling DRIVEVBUS\n");
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 4bff0f6..1f2f4a9 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -590,4 +590,24 @@
 #define read_clusterpwrdn()	read_clusterpwrdn_el1()
 #define write_clusterpwrdn(_v)	write_clusterpwrdn_el1(_v)
 
+#if ERRATA_SPECULATIVE_AT
+/*
+ * Assuming SCTLR.M bit is already enabled
+ * 1. Enable page table walk by clearing TCR_EL1.EPDx bits
+ * 2. Execute AT instruction for lower EL1/0
+ * 3. Disable page table walk by setting TCR_EL1.EPDx bits
+ */
+#define AT(_at_inst, _va)	\
+{	\
+	assert((read_sctlr_el1() & SCTLR_M_BIT) != 0ULL);	\
+	write_tcr_el1(read_tcr_el1() & ~(TCR_EPD0_BIT | TCR_EPD1_BIT));	\
+	isb();	\
+	_at_inst(_va);	\
+	write_tcr_el1(read_tcr_el1() | (TCR_EPD0_BIT | TCR_EPD1_BIT));	\
+	isb();	\
+}
+#else
+#define AT(_at_inst, _va)	_at_inst(_va);
+#endif
+
 #endif /* ARCH_HELPERS_H */
diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h
index 7285077..0513eab 100644
--- a/include/services/ffa_svc.h
+++ b/include/services/ffa_svc.h
@@ -138,4 +138,43 @@
  */
 #define FFA_PARAM_MBZ			U(0x0)
 
+/*
+ * Maximum FF-A endpoint id value
+ */
+#define FFA_ENDPOINT_ID_MAX			U(1 << 16)
+
+/*
+ * Mask for source and destination endpoint id in
+ * a direct message request/response.
+ */
+#define FFA_DIRECT_MSG_ENDPOINT_ID_MASK		U(0xffff)
+
+/*
+ * Bit shift for destination endpoint id in a direct message request/response.
+ */
+#define FFA_DIRECT_MSG_DESTINATION_SHIFT	U(0)
+
+/*
+ * Bit shift for source endpoint id in a direct message request/response.
+ */
+#define FFA_DIRECT_MSG_SOURCE_SHIFT		U(16)
+
+/******************************************************************************
+ * ffa_endpoint_destination
+ *****************************************************************************/
+static inline uint16_t ffa_endpoint_destination(unsigned int ep)
+{
+	return (ep >> FFA_DIRECT_MSG_DESTINATION_SHIFT) &
+		FFA_DIRECT_MSG_ENDPOINT_ID_MASK;
+}
+
+/******************************************************************************
+ * ffa_endpoint_source
+ *****************************************************************************/
+static inline uint16_t ffa_endpoint_source(unsigned int ep)
+{
+	return (ep >> FFA_DIRECT_MSG_SOURCE_SHIFT) &
+		FFA_DIRECT_MSG_ENDPOINT_ID_MASK;
+}
+
 #endif /* FFA_SVC_H */
diff --git a/include/services/spm_core_manifest.h b/include/services/spm_core_manifest.h
index 64ecce0..453b21c 100644
--- a/include/services/spm_core_manifest.h
+++ b/include/services/spm_core_manifest.h
@@ -44,7 +44,7 @@
 	uint32_t binary_size;
 
 	/*
-	 * ID of the SPMD (mandatory)
+	 * ID of the SPMC (mandatory)
 	 */
 	uint16_t spmc_id;
 
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index b105de2..df11d86 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -239,6 +239,20 @@
 	ret
 endfunc check_errata_843419
 
+	/* --------------------------------------------------
+	 * Errata workaround for Cortex A53 Errata #1530924.
+	 * This applies to all revisions of Cortex A53.
+	 * --------------------------------------------------
+	 */
+func check_errata_1530924
+#if ERRATA_A53_1530924
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_1530924
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A53.
 	 * Shall clobber: x0-x19
@@ -359,6 +373,7 @@
 	report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
 	report_errata ERRATA_A53_843419, cortex_a53, 843419
 	report_errata ERRATA_A53_855873, cortex_a53, 855873
+	report_errata ERRATA_A53_1530924, cortex_a53, 1530924
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 8e13824..7838304 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -222,6 +222,20 @@
 	b	cpu_rev_var_ls
 endfunc check_errata_1221012
 
+	/* --------------------------------------------------
+	 * Errata workaround for Cortex A55 Errata #1530923.
+	 * This applies to all revisions of Cortex A55.
+	 * --------------------------------------------------
+	 */
+func check_errata_1530923
+#if ERRATA_A55_1530923
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_1530923
+
 func cortex_a55_reset_func
 	mov	x19, x30
 
@@ -306,6 +320,7 @@
 	report_errata ERRATA_A55_846532, cortex_a55, 846532
 	report_errata ERRATA_A55_903758, cortex_a55, 903758
 	report_errata ERRATA_A55_1221012, cortex_a55, 1221012
+	report_errata ERRATA_A55_1530923, cortex_a55, 1530923
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 3fee470..8ef0f92 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -396,6 +396,20 @@
 	ret
 endfunc check_errata_cve_2018_3639
 
+	/* --------------------------------------------------
+	 * Errata workaround for Cortex A57 Errata #1319537.
+	 * This applies to all revisions of Cortex A57.
+	 * --------------------------------------------------
+	 */
+func check_errata_1319537
+#if ERRATA_A57_1319537
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_1319537
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
 	 * Shall clobber: x0-x19
@@ -613,6 +627,7 @@
 	report_errata ERRATA_A57_829520, cortex_a57, 829520
 	report_errata ERRATA_A57_833471, cortex_a57, 833471
 	report_errata ERRATA_A57_859972, cortex_a57, 859972
+	report_errata ERRATA_A57_1319537, cortex_a57, 1319537
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
 
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 38b76b9..aff6072 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -119,6 +119,20 @@
 	ret
 endfunc check_errata_cve_2018_3639
 
+	/* --------------------------------------------------
+	 * Errata workaround for Cortex A72 Errata #1319367.
+	 * This applies to all revisions of Cortex A72.
+	 * --------------------------------------------------
+	 */
+func check_errata_1319367
+#if ERRATA_A72_1319367
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_1319367
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -282,6 +296,7 @@
 	 * checking functions of each errata.
 	 */
 	report_errata ERRATA_A72_859971, cortex_a72, 859971
+	report_errata ERRATA_A72_1319367, cortex_a72, 1319367
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
 
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 10011f7..0895946 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -465,6 +465,23 @@
 	ret
 endfunc cortex_a76_disable_wa_cve_2018_3639
 
+	/* --------------------------------------------------------------
+	 * Errata Workaround for Cortex A76 Errata #1165522.
+	 * This applies only to revisions <= r3p0 of Cortex A76.
+	 * Due to the nature of the errata it is applied unconditionally
+	 * when built in, report it as applicable in this case
+	 * --------------------------------------------------------------
+	 */
+func check_errata_1165522
+#if ERRATA_A76_1165522
+	mov	x0, #ERRATA_APPLIES
+	ret
+#else
+	mov	x1, #0x30
+	b	cpu_rev_var_ls
+#endif
+endfunc check_errata_1165522
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A76.
 	 * Shall clobber: x0-x19
@@ -597,6 +614,7 @@
 	report_errata ERRATA_A76_1286807, cortex_a76, 1286807
 	report_errata ERRATA_A76_1791580, cortex_a76, 1791580
 	report_errata ERRATA_A76_1800710, cortex_a76, 1800710
+	report_errata ERRATA_A76_1165522, cortex_a76, 1165522
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
 	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
 	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index e494375..8fc3b60 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -130,6 +130,10 @@
 # of by the rich OS.
 ERRATA_A53_855873	?=0
 
+# Flag to apply erratum 1530924 workaround during reset. This erratum applies
+# to all revisions of Cortex A53 cpu.
+ERRATA_A53_1530924	?=0
+
 # Flag to apply erratum 768277 workaround during reset. This erratum applies
 # only to revision r0p0 of the Cortex A55 cpu.
 ERRATA_A55_768277	?=0
@@ -154,6 +158,10 @@
 # only to revision <= r1p0 of the Cortex A55 cpu.
 ERRATA_A55_1221012	?=0
 
+# Flag to apply erratum 1530923 workaround during reset. This erratum applies
+# to all revisions of Cortex A55 cpu.
+ERRATA_A55_1530923	?=0
+
 # Flag to apply erratum 806969 workaround during reset. This erratum applies
 # only to revision r0p0 of the Cortex A57 cpu.
 ERRATA_A57_806969	?=0
@@ -198,10 +206,18 @@
 # only to revision <= r1p3 of the Cortex A57 cpu.
 ERRATA_A57_859972	?=0
 
+# Flag to apply erratum 1319537 workaround during reset. This erratum applies
+# to all revisions of Cortex A57 cpu.
+ERRATA_A57_1319537	?=0
+
 # Flag to apply erratum 855971 workaround during reset. This erratum applies
 # only to revision <= r0p3 of the Cortex A72 cpu.
 ERRATA_A72_859971	?=0
 
+# Flag to apply erratum 1319367 workaround during reset. This erratum applies
+# to all revisions of Cortex A72 cpu.
+ERRATA_A72_1319367	?=0
+
 # Flag to apply erratum 852427 workaround during reset. This erratum applies
 # only to revision r0p0 of the Cortex A73 cpu.
 ERRATA_A73_852427	?=0
@@ -258,6 +274,10 @@
 # only to revision <= r4p0 of the Cortex A76 cpu.
 ERRATA_A76_1800710	?=0
 
+# Flag to apply erratum 1165522 workaround during reset. This erratum applies
+# to all revisions of Cortex A76 cpu.
+ERRATA_A76_1165522	?=0
+
 # Flag to apply erratum 1800714 workaround during reset. This erratum applies
 # only to revision <= r1p1 of the Cortex A77 cpu.
 ERRATA_A77_1800714	?=0
@@ -379,6 +399,10 @@
 $(eval $(call assert_boolean,ERRATA_A53_855873))
 $(eval $(call add_define,ERRATA_A53_855873))
 
+# Process ERRATA_A53_1530924 flag
+$(eval $(call assert_boolean,ERRATA_A53_1530924))
+$(eval $(call add_define,ERRATA_A53_1530924))
+
 # Process ERRATA_A55_768277 flag
 $(eval $(call assert_boolean,ERRATA_A55_768277))
 $(eval $(call add_define,ERRATA_A55_768277))
@@ -403,6 +427,10 @@
 $(eval $(call assert_boolean,ERRATA_A55_1221012))
 $(eval $(call add_define,ERRATA_A55_1221012))
 
+# Process ERRATA_A55_1530923 flag
+$(eval $(call assert_boolean,ERRATA_A55_1530923))
+$(eval $(call add_define,ERRATA_A55_1530923))
+
 # Process ERRATA_A57_806969 flag
 $(eval $(call assert_boolean,ERRATA_A57_806969))
 $(eval $(call add_define,ERRATA_A57_806969))
@@ -447,10 +475,18 @@
 $(eval $(call assert_boolean,ERRATA_A57_859972))
 $(eval $(call add_define,ERRATA_A57_859972))
 
+# Process ERRATA_A57_1319537 flag
+$(eval $(call assert_boolean,ERRATA_A57_1319537))
+$(eval $(call add_define,ERRATA_A57_1319537))
+
 # Process ERRATA_A72_859971 flag
 $(eval $(call assert_boolean,ERRATA_A72_859971))
 $(eval $(call add_define,ERRATA_A72_859971))
 
+# Process ERRATA_A72_1319367 flag
+$(eval $(call assert_boolean,ERRATA_A72_1319367))
+$(eval $(call add_define,ERRATA_A72_1319367))
+
 # Process ERRATA_A73_852427 flag
 $(eval $(call assert_boolean,ERRATA_A73_852427))
 $(eval $(call add_define,ERRATA_A73_852427))
@@ -507,6 +543,10 @@
 $(eval $(call assert_boolean,ERRATA_A76_1800710))
 $(eval $(call add_define,ERRATA_A76_1800710))
 
+# Process ERRATA_A76_1165522 flag
+$(eval $(call assert_boolean,ERRATA_A76_1165522))
+$(eval $(call add_define,ERRATA_A76_1165522))
+
 # Process ERRATA_A77_1800714 flag
 $(eval $(call assert_boolean,ERRATA_A77_1800714))
 $(eval $(call add_define,ERRATA_A77_1800714))
@@ -580,3 +620,10 @@
 TF_CFLAGS_aarch64	+= -mfix-cortex-a53-835769
 TF_LDFLAGS_aarch64	+= --fix-cortex-a53-835769
 endif
+
+ifneq ($(filter 1,${ERRATA_A53_1530924} ${ERRATA_A55_1530923}	\
+	${ERRATA_A57_1319537} ${ERRATA_A72_1319367} ${ERRATA_A76_1165522}),)
+ERRATA_SPECULATIVE_AT	:= 1
+else
+ERRATA_SPECULATIVE_AT	:= 0
+endif
diff --git a/plat/allwinner/common/allwinner-common.mk b/plat/allwinner/common/allwinner-common.mk
index e60ebc6..997aaa6 100644
--- a/plat/allwinner/common/allwinner-common.mk
+++ b/plat/allwinner/common/allwinner-common.mk
@@ -5,6 +5,8 @@
 #
 
 include lib/xlat_tables_v2/xlat_tables.mk
+include lib/libfdt/libfdt.mk
+include drivers/arm/gic/v2/gicv2.mk
 
 AW_PLAT			:=	plat/allwinner
 
@@ -12,8 +14,6 @@
 				-I${AW_PLAT}/common/include		\
 				-I${AW_PLAT}/${PLAT}/include
 
-include lib/libfdt/libfdt.mk
-
 PLAT_BL_COMMON_SOURCES	:=	drivers/ti/uart/${ARCH}/16550_console.S	\
 				${XLAT_TABLES_LIB_SRCS}			\
 				${AW_PLAT}/common/plat_helpers.S	\
@@ -22,9 +22,7 @@
 BL31_SOURCES		+=	drivers/allwinner/axp/common.c		\
 				drivers/allwinner/sunxi_msgbox.c	\
 				drivers/arm/css/scpi/css_scpi.c		\
-				drivers/arm/gic/common/gic_common.c	\
-				drivers/arm/gic/v2/gicv2_helpers.c	\
-				drivers/arm/gic/v2/gicv2_main.c		\
+				${GICV2_SOURCES}			\
 				drivers/delay_timer/delay_timer.c	\
 				drivers/delay_timer/generic_delay_timer.c \
 				lib/cpus/${ARCH}/cortex_a53.S		\
diff --git a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
index a1c9094..ca42da0 100644
--- a/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
+++ b/plat/arm/board/fvp/fdts/fvp_spmc_manifest.dts
@@ -5,6 +5,14 @@
  */
 /dts-v1/;
 
+#define AFF 00
+
+#include "fvp-defs.dtsi"
+#undef POST
+#define POST \
+		enable-method = "psci"; \
+		};
+
 / {
 	compatible = "arm,ffa-core-manifest-1.0";
 	#address-cells = <2>;
@@ -17,6 +25,7 @@
 		exec_state = <0x0>;
 		load_address = <0x0 0x6000000>;
 		entrypoint = <0x0 0x6000000>;
+		binary_size = <0x80000>;
 	};
 
 	chosen {
@@ -38,28 +47,28 @@
 			vcpu_count = <2>;
 			mem_size = <1048576>;
 		};
+		vm3 {
+			is_ffa_partition;
+			debug_name = "cactus-tertiary";
+			load_address = <0x7200000>;
+			vcpu_count = <2>;
+			mem_size = <1048576>;
+		};
 	};
 
 	cpus {
 		#address-cells = <0x2>;
 		#size-cells = <0x0>;
 
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <0x2>;
-				};
-			};
-		};
-
-		cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x0>;
-			enable-method = "psci";
-			next-level-cache = <0xc>;
-			phandle = <0x2>;
-		};
+		CPU_0
+		/* SPM(Hafnium) requires secondary cpu nodes are declared in descending order */
+		CPU_7
+		CPU_6
+		CPU_5
+		CPU_4
+		CPU_3
+		CPU_2
+		CPU_1
 	};
 
 	memory@60000000 {
diff --git a/plat/arm/board/fvp/fdts/fvp_tb_fw_config.dts b/plat/arm/board/fvp/fdts/fvp_tb_fw_config.dts
index 1ee7285..692f5a9 100644
--- a/plat/arm/board/fvp/fdts/fvp_tb_fw_config.dts
+++ b/plat/arm/board/fvp/fdts/fvp_tb_fw_config.dts
@@ -92,6 +92,11 @@
 			load-address = <0x7100000>;
 			owner = "Plat";
 		};
+
+		cactus-tertiary {
+			uuid = <0x735cb579 0xb9448c1d 0xe1619385 0xd2d80a77>;
+			load-address = <0x7200000>;
+		};
 #endif
 	};
 
diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c
index f7ee7a8..296aaf8 100644
--- a/plat/arm/common/arm_common.c
+++ b/plat/arm/common/arm_common.c
@@ -216,7 +216,7 @@
 		 * Translate entry point to Physical Address using the EL1&0
 		 * translation regime, including stage 2.
 		 */
-		ats12e1r(ep);
+		AT(ats12e1r, ep);
 	}
 	isb();
 	par = read_par_el1();
diff --git a/plat/imx/imx8m/gpc_common.c b/plat/imx/imx8m/gpc_common.c
index babcecf..1e55f05 100644
--- a/plat/imx/imx8m/gpc_common.c
+++ b/plat/imx/imx8m/gpc_common.c
@@ -18,6 +18,8 @@
 
 static uint32_t gpc_imr_offset[] = { IMR1_CORE0_A53, IMR1_CORE1_A53, IMR1_CORE2_A53, IMR1_CORE3_A53, };
 
+DEFINE_BAKERY_LOCK(gpc_lock);
+
 #pragma weak imx_set_cpu_pwr_off
 #pragma weak imx_set_cpu_pwr_on
 #pragma weak imx_set_cpu_lpm
@@ -38,16 +40,27 @@
 
 void imx_set_cpu_pwr_off(unsigned int core_id)
 {
+
+	bakery_lock_get(&gpc_lock);
+
 	/* enable the wfi power down of the core */
 	mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id));
+
+	bakery_lock_release(&gpc_lock);
+
 	/* assert the pcg pcr bit of the core */
 	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
 }
 
 void imx_set_cpu_pwr_on(unsigned int core_id)
 {
+	bakery_lock_get(&gpc_lock);
+
 	/* clear the wfi power down bit of the core */
 	mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id));
+
+	bakery_lock_release(&gpc_lock);
+
 	/* assert the ncpuporeset */
 	mmio_clrbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id));
 	/* assert the pcg pcr bit of the core */
@@ -67,6 +80,8 @@
 
 void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
 {
+	bakery_lock_get(&gpc_lock);
+
 	if (pdn) {
 		/* enable the core WFI PDN & IRQ PUP */
 		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
@@ -80,6 +95,8 @@
 		/* deassert the pcg pcr bit of the core */
 		mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
 	}
+
+	bakery_lock_release(&gpc_lock);
 }
 
 /*
diff --git a/plat/imx/imx8m/imx8mq/gpc.c b/plat/imx/imx8m/imx8mq/gpc.c
index 942ae45..367c941 100644
--- a/plat/imx/imx8m/imx8mq/gpc.c
+++ b/plat/imx/imx8m/imx8mq/gpc.c
@@ -19,9 +19,14 @@
 /* use wfi power down the core */
 void imx_set_cpu_pwr_off(unsigned int core_id)
 {
+	bakery_lock_get(&gpc_lock);
+
 	/* enable the wfi power down of the core */
 	mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
 			(1 << (core_id + 20)));
+
+	bakery_lock_release(&gpc_lock);
+
 	/* assert the pcg pcr bit of the core */
 	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
 };
@@ -29,6 +34,8 @@
 /* if out of lpm, we need to do reverse steps */
 void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
 {
+	bakery_lock_get(&gpc_lock);
+
 	if (pdn) {
 		/* enable the core WFI PDN & IRQ PUP */
 		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
@@ -42,6 +49,8 @@
 		/* deassert the pcg pcr bit of the core */
 		mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
 	}
+
+	bakery_lock_release(&gpc_lock);
 }
 
 void imx_pup_pdn_slot_config(int last_core, bool pdn)
diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h
index 6f86e1d..075da91 100644
--- a/plat/imx/imx8m/include/gpc.h
+++ b/plat/imx/imx8m/include/gpc.h
@@ -54,6 +54,8 @@
 	bool always_on;
 };
 
+DECLARE_BAKERY_LOCK(gpc_lock);
+
 /* function declare */
 void imx_gpc_init(void);
 void imx_set_cpu_secure_entry(unsigned int core_index, uintptr_t sec_entrypoint);
diff --git a/services/spd/tlkd/tlkd_common.c b/services/spd/tlkd/tlkd_common.c
index dbe6c2e..820bd8a 100644
--- a/services/spd/tlkd/tlkd_common.c
+++ b/services/spd/tlkd/tlkd_common.c
@@ -38,16 +38,16 @@
 	int at = type & AT_MASK;
 	switch (at) {
 	case 0:
-		ats12e1r(va);
+		AT(ats12e1r, va);
 		break;
 	case 1:
-		ats12e1w(va);
+		AT(ats12e1w, va);
 		break;
 	case 2:
-		ats12e0r(va);
+		AT(ats12e0r, va);
 		break;
 	case 3:
-		ats12e0w(va);
+		AT(ats12e0w, va);
 		break;
 	default:
 		assert(0); /* Unreachable */
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
index 38d43f1..73f7c85 100644
--- a/services/std_svc/spmd/spmd.mk
+++ b/services/std_svc/spmd/spmd.mk
@@ -10,6 +10,7 @@
 
 SPMD_SOURCES	+=	$(addprefix services/std_svc/spmd/,	\
 			${ARCH}/spmd_helpers.S			\
+			spmd_pm.c				\
 			spmd_main.c)
 
 # Let the top-level Makefile know that we intend to include a BL32 image
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index 4c2b58d..6ed2098 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -42,13 +42,35 @@
 static entry_point_info_t *spmc_ep_info;
 
 /*******************************************************************************
+ * SPM Core context on CPU based on mpidr.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
+{
+	return &spm_core_context[plat_core_pos_by_mpidr(mpidr)];
+}
+
+/*******************************************************************************
  * SPM Core context on current CPU get helper.
  ******************************************************************************/
 spmd_spm_core_context_t *spmd_get_context(void)
 {
-	unsigned int linear_id = plat_my_core_pos();
+	return spmd_get_context_by_mpidr(read_mpidr());
+}
 
-	return &spm_core_context[linear_id];
+/*******************************************************************************
+ * SPM Core entry point information get helper.
+ ******************************************************************************/
+entry_point_info_t *spmd_spmc_ep_info_get(void)
+{
+	return spmc_ep_info;
+}
+
+/*******************************************************************************
+ * SPM Core ID getter.
+ ******************************************************************************/
+uint16_t spmd_spmc_id_get(void)
+{
+	return spmc_attrs.spmc_id;
 }
 
 /*******************************************************************************
@@ -125,9 +147,19 @@
 {
 	spmd_spm_core_context_t *ctx = spmd_get_context();
 	uint64_t rc;
+	unsigned int linear_id = plat_my_core_pos();
+	unsigned int core_id;
 
 	VERBOSE("SPM Core init start.\n");
-	ctx->state = SPMC_STATE_RESET;
+	ctx->state = SPMC_STATE_ON_PENDING;
+
+	/* Set the SPMC context state on other CPUs to OFF */
+	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
+		if (core_id != linear_id) {
+			spm_core_context[core_id].state = SPMC_STATE_OFF;
+			spm_core_context[core_id].secondary_ep.entry_point = 0UL;
+		}
+	}
 
 	rc = spmd_spm_core_sync_entry(ctx);
 	if (rc != 0ULL) {
@@ -135,7 +167,8 @@
 		return 0;
 	}
 
-	ctx->state = SPMC_STATE_IDLE;
+	ctx->state = SPMC_STATE_ON;
+
 	VERBOSE("SPM Core init end.\n");
 
 	return 1;
@@ -248,6 +281,9 @@
 
 	INFO("SPM Core setup done.\n");
 
+	/* Register power management hooks with PSCI */
+	psci_register_spd_pm_hook(&spmd_pm);
+
 	/* Register init function for deferred init. */
 	bl31_register_bl32_init(&spmd_init);
 
@@ -301,8 +337,8 @@
 				 uint64_t x4,
 				 void *handle)
 {
-	uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
-	uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
+	unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
+	unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
 
 	/* Save incoming security state */
 	cm_el1_sysregs_context_save(secure_state_in);
@@ -335,6 +371,46 @@
 }
 
 /*******************************************************************************
+ * spmd_check_address_in_binary_image
+ ******************************************************************************/
+bool spmd_check_address_in_binary_image(uint64_t address)
+{
+	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
+
+	return ((address >= spmc_attrs.load_address) &&
+		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
+}
+
+/******************************************************************************
+ * spmd_is_spmc_message
+ *****************************************************************************/
+static bool spmd_is_spmc_message(unsigned int ep)
+{
+	return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
+		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
+}
+
+/******************************************************************************
+ * spmd_handle_spmc_message
+ *****************************************************************************/
+static int spmd_handle_spmc_message(unsigned long long msg,
+		unsigned long long parm1, unsigned long long parm2,
+		unsigned long long parm3, unsigned long long parm4)
+{
+	VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
+		msg, parm1, parm2, parm3, parm4);
+
+	switch (msg) {
+	case SPMD_DIRECT_MSG_SET_ENTRY_POINT:
+		return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+/*******************************************************************************
  * This function handles all SMCs in the range reserved for FFA. Each call is
  * either forwarded to the other security state or handled by the SPM dispatcher
  ******************************************************************************/
@@ -367,7 +443,7 @@
 		 * this CPU. If so, then indicate that the SPM Core initialised
 		 * unsuccessfully.
 		 */
-		if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
+		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
 			spmd_spm_core_sync_exit(x2);
 		}
 
@@ -451,6 +527,35 @@
 
 		break; /* not reached */
 
+	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+		if (secure_origin && spmd_is_spmc_message(x1)) {
+			ret = spmd_handle_spmc_message(x3, x4,
+				SMC_GET_GP(handle, CTX_GPREG_X5),
+				SMC_GET_GP(handle, CTX_GPREG_X6),
+				SMC_GET_GP(handle, CTX_GPREG_X7));
+
+			SMC_RET8(handle, FFA_SUCCESS_SMC32,
+				FFA_TARGET_INFO_MBZ, ret,
+				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				FFA_PARAM_MBZ);
+		} else {
+			/* Forward direct message to the other world */
+			return spmd_smc_forward(smc_fid, secure_origin,
+				x1, x2, x3, x4, handle);
+		}
+		break; /* Not reached */
+
+	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+		if (secure_origin && spmd_is_spmc_message(x1)) {
+			spmd_spm_core_sync_exit(0);
+		} else {
+			/* Forward direct message to the other world */
+			return spmd_smc_forward(smc_fid, secure_origin,
+				x1, x2, x3, x4, handle);
+		}
+		break; /* Not reached */
+
 	case FFA_RX_RELEASE:
 	case FFA_RXTX_MAP_SMC32:
 	case FFA_RXTX_MAP_SMC64:
@@ -466,9 +571,7 @@
 
 	case FFA_PARTITION_INFO_GET:
 	case FFA_MSG_SEND:
-	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
-	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
 	case FFA_MEM_DONATE_SMC32:
 	case FFA_MEM_DONATE_SMC64:
@@ -500,7 +603,7 @@
 		 * this CPU from the Secure world. If so, then indicate that the
 		 * SPM Core initialised successfully.
 		 */
-		if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
+		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
 			spmd_spm_core_sync_exit(0);
 		}
 
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
new file mode 100644
index 0000000..64ddbe5
--- /dev/null
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * spmd_build_spmc_message
+ *
+ * Builds an SPMD to SPMC direct message request.
+ ******************************************************************************/
+static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
+{
+	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+	write_ctx_reg(gpregs, CTX_GPREG_X1,
+		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+		spmd_spmc_id_get());
+	write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
+	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+}
+
+/*******************************************************************************
+ * spmd_pm_secondary_core_set_ep
+ ******************************************************************************/
+int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
+		uintptr_t entry_point, unsigned long long context)
+{
+	int id = plat_core_pos_by_mpidr(mpidr);
+
+	if ((id < 0) || (id >= PLATFORM_CORE_COUNT)) {
+		ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check entry_point address is a PA within
+	 * load_address <= entry_point < load_address + binary_size
+	 */
+	if (!spmd_check_address_in_binary_image(entry_point)) {
+		ERROR("%s entry point is not within image boundaries (%llx)\n",
+		      __func__, mpidr);
+		return -EINVAL;
+	}
+
+	spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
+	spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
+	if (secondary_ep->locked) {
+		ERROR("%s entry locked (%llx)\n", __func__, mpidr);
+		return -EINVAL;
+	}
+
+	/* Fill new entry to corresponding secondary core id and lock it */
+	secondary_ep->entry_point = entry_point;
+	secondary_ep->context = context;
+	secondary_ep->locked = true;
+
+	VERBOSE("%s %d %llx %lx %llx\n",
+		__func__, id, mpidr, entry_point, context);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
+ * of the SPMC initialization path, they will initialize any SPs that they
+ * manage. Entry into SPMC is done after initialising minimal architectural
+ * state that guarantees safe execution.
+ ******************************************************************************/
+static void spmd_cpu_on_finish_handler(u_register_t unused)
+{
+	entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get();
+	spmd_spm_core_context_t *ctx = spmd_get_context();
+	unsigned int linear_id = plat_my_core_pos();
+	uint64_t rc;
+
+	assert(ctx != NULL);
+	assert(ctx->state != SPMC_STATE_ON);
+	assert(spmc_ep_info != NULL);
+
+	/*
+	 * TODO: this might require locking the spmc_ep_info structure,
+	 * or provisioning one structure per cpu
+	 */
+	if (ctx->secondary_ep.entry_point == 0UL) {
+		goto exit;
+	}
+
+	spmc_ep_info->pc = ctx->secondary_ep.entry_point;
+	cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
+	write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
+		      ctx->secondary_ep.context);
+
+	/* Mark CPU as initiating ON operation */
+	ctx->state = SPMC_STATE_ON_PENDING;
+
+	rc = spmd_spm_core_sync_entry(ctx);
+	if (rc != 0ULL) {
+		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc,
+			linear_id);
+		ctx->state = SPMC_STATE_OFF;
+		return;
+	}
+
+exit:
+	ctx->state = SPMC_STATE_ON;
+
+	VERBOSE("CPU %u on!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmd_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmd_cpu_off_handler(u_register_t unused)
+{
+	spmd_spm_core_context_t *ctx = spmd_get_context();
+	unsigned int linear_id = plat_my_core_pos();
+	int64_t rc;
+
+	assert(ctx != NULL);
+	assert(ctx->state != SPMC_STATE_OFF);
+
+	if (ctx->secondary_ep.entry_point == 0UL) {
+		goto exit;
+	}
+
+	/* Build an SPMD to SPMC direct message request. */
+	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
+
+	rc = spmd_spm_core_sync_entry(ctx);
+	if (rc != 0ULL) {
+		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
+	}
+
+	/* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */
+
+exit:
+	ctx->state = SPMC_STATE_OFF;
+
+	VERBOSE("CPU %u off!\n", linear_id);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Dispatcher to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmd_pm = {
+	.svc_on_finish = spmd_cpu_on_finish_handler,
+	.svc_off = spmd_cpu_off_handler
+};
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index 4946309..eff0dd9 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -30,14 +30,24 @@
 #define SPMD_C_RT_CTX_ENTRIES		(SPMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
 
 #ifndef __ASSEMBLER__
-#include <services/ffa_svc.h>
 #include <stdint.h>
+#include <lib/psci/psci_lib.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
 
 typedef enum spmc_state {
 	SPMC_STATE_RESET = 0,
-	SPMC_STATE_IDLE
+	SPMC_STATE_OFF,
+	SPMC_STATE_ON_PENDING,
+	SPMC_STATE_ON
 } spmc_state_t;
 
+typedef struct spmd_pm_secondary_ep {
+	uintptr_t entry_point;
+	uintptr_t context;
+	bool locked;
+} spmd_pm_secondary_ep_t;
+
 /*
  * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
  * the SPM core (SPMC) at the next lower EL.
@@ -46,16 +56,20 @@
 	uint64_t c_rt_ctx;
 	cpu_context_t cpu_ctx;
 	spmc_state_t state;
+	spmd_pm_secondary_ep_t secondary_ep;
 } spmd_spm_core_context_t;
 
 /*
  * Reserve ID for NS physical FFA Endpoint.
  */
-#define FFA_NS_ENDPOINT_ID		U(0)
+#define FFA_NS_ENDPOINT_ID			U(0)
 
-/* Mask and shift to check valid secure FFA Endpoint ID. */
-#define SPMC_SECURE_ID_MASK		U(1)
-#define SPMC_SECURE_ID_SHIFT		U(15)
+/* Mask and shift to check valid secure FF-A Endpoint ID. */
+#define SPMC_SECURE_ID_MASK			U(1)
+#define SPMC_SECURE_ID_SHIFT			U(15)
+
+#define SPMD_DIRECT_MSG_ENDPOINT_ID		U(FFA_ENDPOINT_ID_MAX - 1)
+#define SPMD_DIRECT_MSG_SET_ENTRY_POINT		U(1)
 
 /* Functions used to enter/exit SPMC synchronously */
 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
@@ -65,9 +79,25 @@
 uint64_t spmd_spm_core_enter(uint64_t *c_rt_ctx);
 void __dead2 spmd_spm_core_exit(uint64_t c_rt_ctx, uint64_t ret);
 
+/* SPMD SPD power management handlers */
+extern const spd_pm_ops_t spmd_pm;
+
+/* SPMC entry point information helper */
+entry_point_info_t *spmd_spmc_ep_info_get(void);
+
+/* SPMC ID getter */
+uint16_t spmd_spmc_id_get(void);
+
+/* SPMC context on CPU based on mpidr */
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr);
+
 /* SPMC context on current CPU get helper */
 spmd_spm_core_context_t *spmd_get_context(void);
 
+int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
+		uintptr_t entry_point, unsigned long long context);
+bool spmd_check_address_in_binary_image(uint64_t address);
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* SPMD_PRIVATE_H */