Merge "feat(tcr2): add asymmetric feature testing for FEAT_TCR2"
diff --git a/docs/plat/index.rst b/docs/plat/index.rst
index 33c0ab9..b0ebc46 100644
--- a/docs/plat/index.rst
+++ b/docs/plat/index.rst
@@ -6,6 +6,7 @@
    :caption: Contents
    :hidden:
 
+   xilinx-versal2
    xilinx-versal_net
    xilinx-versal
    xilinx-zynqmp
diff --git a/docs/plat/xilinx-versal2.rst b/docs/plat/xilinx-versal2.rst
new file mode 100644
index 0000000..841340e
--- /dev/null
+++ b/docs/plat/xilinx-versal2.rst
@@ -0,0 +1,26 @@
+..
+  Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved. !
+
+  SPDX-License-Identifier: BSD-3-Clause !
+
+
+AMD Versal Gen 2
+=============
+
+- Logs are available only on console and not saved in memory(No NVM support).
+- Versal Gen 2 Platform uses TTC Timer
+
+
+Build Command
+-------------
+For individual tests/test suite:
+
+.. code-block:: shell
+
+        make CROSS_COMPILE=aarch64-none-elf- PLAT=versal2 TESTS=<required tests> tftf
+
+For Versal2 Specific tests (includes AMD-Xilinx Tests cases + Standard Test Suite)
+
+.. code-block:: shell
+
+        make CROSS_COMPILE=aarch64-none-elf- PLAT=versal2 TESTS=versal tftf
diff --git a/include/runtime_services/ffa_helpers.h b/include/runtime_services/ffa_helpers.h
index 917885f..4dc3f53 100644
--- a/include/runtime_services/ffa_helpers.h
+++ b/include/runtime_services/ffa_helpers.h
@@ -867,6 +867,21 @@
 void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
 					  ffa_memory_handle_t handle);
 
+static inline uint32_t ffa_mem_retrieve_res_total_size(struct ffa_value ret)
+{
+	return ret.arg1;
+}
+
+static inline uint32_t ffa_mem_retrieve_res_frag_size(struct ffa_value ret)
+{
+	return ret.arg2;
+}
+
+static inline uint32_t ffa_mem_frag_tx_frag_size(struct ffa_value ret)
+{
+	return ret.arg3;
+}
+
 uint32_t ffa_memory_region_init(
 	struct ffa_memory_region *memory_region, size_t memory_region_max_size,
 	ffa_id_t sender, struct ffa_memory_access receivers[],
diff --git a/include/runtime_services/spm_common.h b/include/runtime_services/spm_common.h
index 3fe154a..c794cb0 100644
--- a/include/runtime_services/spm_common.h
+++ b/include/runtime_services/spm_common.h
@@ -116,6 +116,10 @@
 		     uint32_t receiver_count, ffa_memory_region_flags_t flags,
 		     bool is_normal_memory);
 
+bool hypervisor_retrieve_request_continue(
+	struct mailbox_buffers *mb, uint64_t handle, void *out, uint32_t out_size,
+	uint32_t total_size, uint32_t fragment_offset, bool release_rx);
+
 bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
 				 void *out, uint32_t out_size);
 
diff --git a/plat/amd/versal2/aarch64/plat_helpers.S b/plat/amd/versal2/aarch64/plat_helpers.S
new file mode 100644
index 0000000..715d5f1
--- /dev/null
+++ b/plat/amd/versal2/aarch64/plat_helpers.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.global platform_get_core_pos
+	.global plat_crash_console_init
+	.global plat_crash_console_flush
+	.global plat_crash_console_putc
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(u_register_t mpid)
+ *
+ * Function to calculate the core position.
+ *
+ * clobbers: x0 - x3
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+	/*
+	 * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+	 * look as if in a multi-threaded implementation.
+	 */
+	tst	x0, #MPIDR_MT_MASK
+	lsl	x3, x0, #MPIDR_AFFINITY_BITS
+	csel	x3, x3, x0, eq
+
+	/* Extract individual affinity fields from MPIDR */
+	ubfx	x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+	/* Compute linear position */
+	mov	x3, #PLATFORM_CORE_COUNT_PER_CLUSTER
+	madd	x1, x2, x3, x1
+	mov	x3, #PLATFORM_MAX_PE_PER_CPU
+	madd	x0, x1, x3, x0
+	ret
+endfunc platform_get_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, CRASH_CONSOLE_BASE
+	mov_imm	x1, PL011_UART_CLK_IN_HZ
+	mov_imm	x2, PL011_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, CRASH_CONSOLE_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_flush()
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0 - r1
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_flush
+	mov_imm	x1, CRASH_CONSOLE_BASE
+	b	console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/amd/versal2/include/platform_def.h b/plat/amd/versal2/include/platform_def.h
new file mode 100644
index 0000000..645a5eb
--- /dev/null
+++ b/plat/amd/versal2/include/platform_def.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+
+#define PLATFORM_LINKER_FORMAT			"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH			aarch64
+
+#define TFTF_BASE				U(0x8000000)
+
+#define CACHE_WRITEBACK_GRANULE			U(0x40)
+
+#define PLATFORM_CLUSTER_COUNT			U(4)
+#define PLATFORM_CORE_COUNT_PER_CLUSTER		U(2)
+#define PLATFORM_MAX_PE_PER_CPU			U(1)
+/* Because of make_mpid from include/lib/tftf_lib.h */
+#define PLAT_MAX_PE_PER_CPU		PLATFORM_MAX_PE_PER_CPU
+
+#define PLATFORM_CORE_COUNT			(PLATFORM_CLUSTER_COUNT * \
+						PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS			(PLATFORM_CORE_COUNT + \
+						PLATFORM_CLUSTER_COUNT + 1)
+#define PLATFORM_MAX_AFFLVL			MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL			MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_STATES_PER_LVL		U(2)
+
+
+#define PLATFORM_STACK_SIZE			U(0x440)
+#define PCPU_DV_MEM_STACK_SIZE			U(0x440)
+
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE		(1ULL << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE		(1ULL << 32)
+#define MAX_XLAT_TABLES				U(8)
+#define MAX_MMAP_REGIONS			U(16)
+
+#define DRAM_BASE				U(0x0)
+#define DRAM_SIZE				U(0x80000000)
+
+/*
+ * TFTF_NVM_OFFSET/SIZE correspond to the NVM partition in the partition
+ * table
+ */
+#define TFTF_NVM_SIZE				U(0x600000)
+#define TFTF_NVM_OFFSET				U(0x20000000)
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH			U(4)
+
+/* GIC related addresses from datasheet */
+#define GICD_REG_BASE				U(0xe2000000)
+#define GICR_REG_BASE				U(0xe2060000)
+
+/* GICv3 is used, dummy definition to resolve build failure */
+#define GICC_REG_BASE				U(0xe2000000)
+
+/*
+ * Memory mapped devices that we must create MMU mappings for them
+ */
+#define GIC_BASE				GICD_REG_BASE
+#define GIC_SIZE				U(0x01000000)
+
+#define TTC_BASE				U(0xF1E70000)
+#define TTC_SIZE				U(0x00010000)
+
+#define SYS_CNT_BASE1				TTC_BASE
+#define SYS_CNT_SIZE				TTC_SIZE
+
+#define LPD_IOU_SLCR				U(0xEB410000)
+#define LPD_IOU_SLCR_SIZE			U(0x00010000)
+
+/* ARM PL011 UART */
+#define PL011_UART0_BASE			U(0xf1920000)
+#define PL011_BAUDRATE				U(115200)
+#define PL011_UART_CLK_IN_HZ			U(100000000)
+
+#define PLAT_ARM_UART_BASE                      PL011_UART0_BASE
+#define PLAT_ARM_UART_SIZE                      U(0x1000)
+
+#define CRASH_CONSOLE_BASE			PL011_UART0_BASE
+#define CRASH_CONSOLE_SIZE			PLAT_ARM_UART_SIZE
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interrupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0				0
+#define IRQ_NS_SGI_1				1
+#define IRQ_NS_SGI_2				2
+#define IRQ_NS_SGI_3				3
+#define IRQ_NS_SGI_4				4
+#define IRQ_NS_SGI_5				5
+#define IRQ_NS_SGI_6				6
+#define IRQ_NS_SGI_7				7
+
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER			U(29)
+/* Datasheet: TIME00 event*/
+#define IRQ_CNTPSIRQ1				U(29)
+
+/* Refer to AM011(v1.5), Chapter 50, Page 430 */
+#define PLAT_MAX_SPI_OFFSET_ID			U(223)
+
+/*
+ * Times(in ms) used by test code for completion of different events.
+ */
+#define PLAT_SUSPEND_ENTRY_TIME			U(15)
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME		U(30)
+
+/*
+ * Dummy definitions that we need just to compile...
+ */
+#define ARM_SECURE_SERVICE_BUFFER_BASE		U(0)
+#define ARM_SECURE_SERVICE_BUFFER_SIZE		U(100)
+
+/* LPD_SWDT_INT, AM011(v1.5), Chapter 50, Page 428 */
+#define IRQ_TWDOG_INTID				U(0x51)
+
+#define TTC_TIMER_IRQ				U(75)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/amd/versal2/platform.mk b/plat/amd/versal2/platform.mk
new file mode 100644
index 0000000..2304d2f
--- /dev/null
+++ b/plat/amd/versal2/platform.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLNX_COMMON_PATH :=	plat/xilinx/common
+VERSAL2_PATH :=	plat/amd/versal2
+
+PLAT_INCLUDES	:=	-I${XLNX_COMMON_PATH}/include/			\
+			-I${VERSAL2_PATH}/include/
+
+PLAT_SOURCES	:=	drivers/arm/gic/arm_gic_v2v3.c                  \
+			drivers/arm/gic/gic_common.c                    \
+			drivers/arm/gic/gic_v2.c                        \
+			drivers/arm/gic/gic_v3.c                        \
+			drivers/arm/pl011/${ARCH}/pl011_console.S       \
+			drivers/arm/timer/private_timer.c		\
+			drivers/console/console.c                       \
+			${VERSAL2_PATH}/versal2_setup.c		\
+			${VERSAL2_PATH}/versal2_pwr_state.c	\
+			${VERSAL2_PATH}/aarch64/plat_helpers.S	\
+			${XLNX_COMMON_PATH}/timer/timers.c
+
+PLAT_TESTS_SKIP_LIST    := ${VERSAL2_PATH}/tests_to_skip.txt
+
+ifeq ($(USE_NVM),1)
+$(error "Versal2 port of TFTF doesn't currently support USE_NVM=1")
+endif
diff --git a/plat/amd/versal2/tests_to_skip.txt b/plat/amd/versal2/tests_to_skip.txt
new file mode 100644
index 0000000..2c286ec
--- /dev/null
+++ b/plat/amd/versal2/tests_to_skip.txt
@@ -0,0 +1,64 @@
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+################################################################################
+# Disable the listed tests for Versal2 Platform.
+################################################################################
+#TESTS: tftf-validation
+Framework Validation/Events API
+Framework Validation/NVM serialisation
+
+#TESTS: Timer framework Validation
+Timer framework Validation
+
+#TESTS: Boot requirement tests
+Boot requirement tests
+
+ARM_ARCH_SVC
+PMU Leakage
+
+#TESTS: CPU extensions
+CPU extensions/PMUv3 SMC counter preservation
+
+#TESTS: Performance tests
+Performance tests/Test cluster power up latency
+
+#TESTS: FF-A
+FF-A Setup and Discovery/FF-A RXTX unmap SP rxtx buffer
+FF-A Setup and Discovery/Test FFA_PARTITION_INFO_GET v1.0
+FF-A Memory Sharing/Lend memory, clear flag set
+SIMD,SVE Registers context/Check that SIMD registers context is preserved
+FF-A Interrupt
+FF-A Notifications
+
+#TESTS: AMD-Xilinx tests
+AMD-Xilinx tests
+
+#TESTS: psci
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend/CPU suspend to powerdown at level 0
+PSCI CPU Suspend/CPU suspend to powerdown at level 1
+PSCI CPU Suspend/CPU suspend to powerdown at level 2
+PSCI CPU Suspend/CPU suspend to standby at level 0
+PSCI CPU Suspend/CPU suspend to standby at level 1
+PSCI CPU Suspend/CPU suspend to standby at level 2
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 0 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 1 in OSI mode
+PSCI System Suspend Validation/System suspend multiple times
+PSCI System Suspend Validation/system suspend from all cores
+PSCI System Suspend Validation/Validate suspend to RAM functionality
+PSCI System Suspend Validation/Validate PSCI System Suspend API
+
+#PSCI
+PSCI Features
+PSCI CPU Suspend in OSI mode
+
+#Query runtime services
+Query runtime services/Query Vendor-Specific Service
+Query runtime services/Probe PMF Version
+
+#TESTS: el3-power-state
+EL3 power state parser validation
diff --git a/plat/amd/versal2/versal2_pwr_state.c b/plat/amd/versal2/versal2_pwr_state.c
new file mode 100644
index 0000000..a9bff07
--- /dev/null
+++ b/plat/amd/versal2/versal2_pwr_state.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * State IDs for local power states.
+ */
+#define VERSAL2_RETENTION_STATE_ID	1	/* Valid for only CPUs */
+#define VERSAL2_OFF_STATE_ID		0	/* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+	VERSAL2_RUN_DEPTH = 0,
+	VERSAL2_RETENTION_DEPTH,
+	VERSAL2_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+	{VERSAL2_RETENTION_DEPTH, VERSAL2_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+	{VERSAL2_OFF_DEPTH, VERSAL2_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+	{0},
+};
+
+/* The state property array with details of idle state possible for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+	{VERSAL2_OFF_DEPTH, VERSAL2_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+	{0},
+};
+
+/* The state property array with details of idle state possible for the system level */
+static const plat_state_prop_t system_state_prop[] = {
+	{VERSAL2_OFF_DEPTH, VERSAL2_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+	{0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+	switch (level) {
+	case MPIDR_AFFLVL0:
+		return core_state_prop;
+	case MPIDR_AFFLVL1:
+		return cluster_state_prop;
+	case MPIDR_AFFLVL2:
+		return system_state_prop;
+	default:
+		return NULL;
+	}
+}
diff --git a/plat/amd/versal2/versal2_setup.c b/plat/amd/versal2/versal2_setup.c
new file mode 100644
index 0000000..0db69d6
--- /dev/null
+++ b/plat/amd/versal2/versal2_setup.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <drivers/arm/arm_gic.h>
+#include <drivers/console.h>
+#include <platform.h>
+#include <tftf_lib.h>
+
+#include <platform_def.h>
+#include <util.h>
+
+static const struct {
+	unsigned int cluster_id;
+	unsigned int cpu_id;
+} versal2_cores[PLATFORM_CORE_COUNT] = {
+	CLUSTER_DEF(0),
+	CLUSTER_DEF(1),
+	CLUSTER_DEF(2),
+	CLUSTER_DEF(3)
+};
+
+
+static const mmap_region_t mmap[] = {
+	MAP_REGION_FLAT(DRAM_BASE + TFTF_NVM_OFFSET, TFTF_NVM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+	MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(CRASH_CONSOLE_BASE, CRASH_CONSOLE_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(TTC_BASE, TTC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	MAP_REGION_FLAT(LPD_IOU_SLCR, LPD_IOU_SLCR_SIZE, MT_DEVICE | MT_RW | MT_NS),
+	{0}
+};
+
+/* Power Domain Tree Descriptor array */
+const unsigned char versal2_pwr_tree_desc[] = {
+	/* Number of root nodes */
+	1,
+	/* Number of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* Number of children for the first cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+	/* Number of children for the second cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+	/* Number of children for the third cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+	/* Number of children for the fourth cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER
+};
+
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+	return versal2_pwr_tree_desc;
+}
+
+/*
+ * Generate the MPID from the core position.
+ */
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+	assert(core_pos < PLATFORM_CORE_COUNT);
+
+	return (uint64_t)make_mpid(versal2_cores[core_pos].cluster_id,
+				versal2_cores[core_pos].cpu_id);
+}
+
+void tftf_plat_arch_setup(void)
+{
+	tftf_plat_configure_mmu();
+}
+
+void tftf_early_platform_setup(void)
+{
+	console_init(CRASH_CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+}
+
+void tftf_platform_setup(void)
+{
+	arm_gic_init(GICC_REG_BASE, GICD_REG_BASE, GICR_REG_BASE);
+	arm_gic_setup_global();
+	arm_gic_setup_local();
+}
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+	return mmap;
+}
diff --git a/plat/xilinx/common/include/util.h b/plat/xilinx/common/include/util.h
new file mode 100644
index 0000000..c6d16ce
--- /dev/null
+++ b/plat/xilinx/common/include/util.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <platform_def.h>
+
+#define CPU_DEF(cluster, cpu)	{ cluster, cpu }
+
+#if (PLATFORM_CORE_COUNT_PER_CLUSTER == 1U)
+#define CLUSTER_DEF(cluster)	\
+	CPU_DEF(cluster, 0)
+#elif (PLATFORM_CORE_COUNT_PER_CLUSTER == 2U)
+#define CLUSTER_DEF(cluster)	\
+	CPU_DEF(cluster, 0),		\
+	CPU_DEF(cluster, 1)
+#elif (PLATFORM_CORE_COUNT_PER_CLUSTER == 4U)
+#define CLUSTER_DEF(cluster)	\
+	CPU_DEF(cluster, 0),		\
+	CPU_DEF(cluster, 1),		\
+	CPU_DEF(cluster, 2),		\
+	CPU_DEF(cluster, 3)
+#endif
+
+#endif /* UTIL_H */
diff --git a/plat/xilinx/common/timer/timers.c b/plat/xilinx/common/timer/timers.c
index f53cd84..a6e1afa 100644
--- a/plat/xilinx/common/timer/timers.c
+++ b/plat/xilinx/common/timer/timers.c
@@ -27,6 +27,7 @@
 
 #define TTC_CNT_CNTRL_DISABLE_MASK	BIT(0)
 
+#define TTC_CLK_SEL_OFFSET		U(0x360)
 #define TTC_CLK_SEL_MASK		GENMASK(1, 0)
 
 #define TTC_CLK_SEL_PS_REF		BIT(0)
diff --git a/spm/common/sp_tests/sp_test_ffa.c b/spm/common/sp_tests/sp_test_ffa.c
index 6e7fe1a..e7dd720 100644
--- a/spm/common/sp_tests/sp_test_ffa.c
+++ b/spm/common/sp_tests/sp_test_ffa.c
@@ -33,6 +33,7 @@
 		.properties = (FFA_PARTITION_AARCH64_EXEC |
 			       FFA_PARTITION_DIRECT_REQ_RECV |
 			       FFA_PARTITION_DIRECT_REQ_SEND |
+			       FFA_PARTITION_INDIRECT_MSG |
 			       FFA_PARTITION_NOTIFICATION),
 		.uuid = {PRIMARY_UUID}
 	},
@@ -84,13 +85,16 @@
 	/* Get common features between tftf and cactus. */
 	unsigned int test_target_size =
 		get_ffa_feature_test_target(&func_id_targets);
-	struct ffa_features_test feature_id_targets[3] = {
+	/* Specific to SPs. */
+	struct ffa_features_test feature_id_targets[] = {
 		{"FFA_FEATURE_MEI", FFA_FEATURE_MEI, FFA_SUCCESS_SMC32, 0,
 			FFA_VERSION_1_1},
 		{"FFA_FEATURE_SRI", FFA_FEATURE_SRI, FFA_ERROR, 0,
 			FFA_VERSION_1_1},
 		{"FFA_FEATURE_NPI", FFA_FEATURE_NPI, FFA_SUCCESS_SMC32, 0,
 			FFA_VERSION_1_1},
+		{"FFA_YIELD_32", FFA_MSG_YIELD, FFA_SUCCESS_SMC32,
+			FFA_VERSION_1_0},
 	};
 
 	INFO("Test FFA_FEATURES.\n");
diff --git a/tftf/tests/plat/amd/versal2/test_all_cores.c b/tftf/tests/plat/amd/versal2/test_all_cores.c
new file mode 100644
index 0000000..d7342bf
--- /dev/null
+++ b/tftf/tests/plat/amd/versal2/test_all_cores.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * Test entry point function for non-lead CPUs.
+ * Specified by the lead CPU when bringing up other CPUs.
+ */
+static test_result_t non_lead_cpu_fn(void)
+{
+	unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+	unsigned int core_pos = platform_get_core_pos(mpid);
+
+	VERBOSE("%s\n", __func__);
+
+	/* Signal to the lead CPU that the calling CPU has entered the test */
+	tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+	return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Code for a test running on multiple CPUs.
+ *
+ * This "test" powers on all CPUs on the platform and report test success.
+ * The function test_multi_core() runs on the lead CPU only.
+ * The test entrypoint for other CPUs is non_lead_cpu_fn(), as specified when
+ * bringing them up.
+ * 1) Power on all secondary cores
+ * 2) after a delay power off all secondary cores
+ *
+ * The test is skipped if an error occurs during the bring-up of non-lead CPUs.
+ */
+test_result_t test_power_on_cores(void)
+{
+	unsigned int lead_mpid;
+	unsigned int cpu_mpid, cpu_node;
+	unsigned int core_pos;
+	int psci_ret;
+
+	lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+	VERBOSE("%s: lead_mpid = 0x%x\n", __func__, lead_mpid);
+
+	SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+	/* Power on all CPUs */
+	for_each_cpu(cpu_node) {
+		cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+		/* Skip lead CPU as it is already powered on */
+		if (cpu_mpid == lead_mpid)
+			continue;
+
+		VERBOSE("%s getting cpu_mpid = 0x%x powered on\n", __func__,
+				cpu_mpid);
+		psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
+		if (psci_ret != PSCI_E_SUCCESS) {
+			tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n",
+					cpu_mpid, psci_ret);
+			/*return TEST_RESULT_SKIPPED; */
+		}
+	}
+
+	/* Wait for non-lead CPUs to enter the test */
+	for_each_cpu(cpu_node) {
+		cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+		/* Skip lead CPU */
+		if (cpu_mpid == lead_mpid)
+			continue;
+
+		core_pos = platform_get_core_pos(cpu_mpid);
+		tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+	}
+
+	return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
index 9cef234..fdb1d4a 100644
--- a/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
+++ b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
@@ -910,14 +910,16 @@
 			int8_t cur_level = RMI_RETURN_INDEX(ret);
 
 			if ((int)cur_level < level) {
-				ret = host_rmi_create_rtt_levels(realm,
-								 start,
-								 cur_level,
-								 level);
-				if (ret != RMI_SUCCESS) {
+				u_register_t cret;
+
+				cret = host_rmi_create_rtt_levels(realm,
+								  start,
+								  cur_level,
+								  level);
+				if (cret != RMI_SUCCESS) {
 					ERROR("%s() failed, ret=0x%lx line=%u\n",
 						"host_rmi_create_rtt_levels",
-						ret, __LINE__);
+						cret, __LINE__);
 					return REALM_ERROR;
 				}
 
diff --git a/tftf/tests/runtime_services/secure_service/spm_common.c b/tftf/tests/runtime_services/secure_service/spm_common.c
index 06e76db..753d491 100644
--- a/tftf/tests/runtime_services/secure_service/spm_common.c
+++ b/tftf/tests/runtime_services/secure_service/spm_common.c
@@ -202,7 +202,6 @@
 		FFA_SUCCESS_SMC32},
 	{"FFA_NOTIFICATION_INFO_GET_64", FFA_NOTIFICATION_INFO_GET_SMC64,
 		FFA_SUCCESS_SMC32},
-	{"FFA_YIELD_32", FFA_MSG_YIELD, FFA_ERROR},
 	{"Check non-existent command", 0xFFFF, FFA_ERROR},
 };
 
@@ -334,6 +333,99 @@
 	return true;
 }
 
+/**
+ * Looping part of the fragmented retrieve request.
+ */
+bool hypervisor_retrieve_request_continue(
+	struct mailbox_buffers *mb, uint64_t handle, void *out, uint32_t out_size,
+	uint32_t total_size, uint32_t fragment_offset, bool release_rx)
+{
+	struct ffa_value ret;
+	uint32_t fragment_size;
+
+	if (mb == NULL) {
+		ERROR("Invalid parameters, please provide valid mailbox.\n");
+		return false;
+	}
+
+	while (fragment_offset < total_size) {
+		VERBOSE("Calling again. frag offset: %d; total: %d\n",
+			fragment_offset, total_size);
+
+		/* The first time it is called is controlled through arguments. */
+		if (release_rx) {
+			ret = ffa_rx_release();
+			if (ret.fid != FFA_SUCCESS_SMC32) {
+				ERROR("ffa_rx_release() failed: %d\n",
+				      ffa_error_code(ret));
+				return false;
+			}
+		} else {
+			release_rx = true;
+		}
+
+		ret = ffa_mem_frag_rx(handle, fragment_offset);
+		if (ret.fid != FFA_MEM_FRAG_TX) {
+			ERROR("ffa_mem_frag_rx() failed: %d\n",
+			      ffa_error_code(ret));
+			return false;
+		}
+
+		if (ffa_frag_handle(ret) != handle) {
+			ERROR("%s: fragment handle mismatch: expected %llu, got "
+			      "%llu\n",
+			      __func__, handle, ffa_frag_handle(ret));
+			return false;
+		}
+
+		/* Sender MBZ at physical instance. */
+		if (ffa_frag_sender(ret) != 0) {
+			ERROR("%s: fragment sender mismatch: expected %d, got "
+			      "%d\n",
+			      __func__, 0, ffa_frag_sender(ret));
+			return false;
+		}
+
+		fragment_size = ffa_mem_frag_tx_frag_size(ret);
+
+		if (fragment_size == 0) {
+			ERROR("%s: fragment size must not be 0\n", __func__);
+			return false;
+		}
+
+		if (out != NULL) {
+			if (fragment_offset + fragment_size > out_size) {
+				ERROR("%s: fragment is too big to fit in out buffer "
+				      "(%d > %d)\n",
+				      __func__, fragment_offset + fragment_size,
+				      out_size);
+				return false;
+			}
+
+			VERBOSE("Copying fragment at offset %d with size %d\n",
+				fragment_offset, fragment_size);
+			memcpy((uint8_t *)out + fragment_offset, mb->recv,
+			       fragment_size);
+		}
+
+		fragment_offset += fragment_size;
+	}
+
+	if (fragment_offset != total_size) {
+		ERROR("%s: fragment size mismatch: expected %d, got %d\n",
+		      __func__, total_size, fragment_offset);
+		return false;
+	}
+
+	ret = ffa_rx_release();
+	if (ret.fid != FFA_SUCCESS_SMC32) {
+		ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
+		return false;
+	}
+
+	return true;
+}
+
 bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
 				 void *out, uint32_t out_size)
 {
@@ -343,8 +435,8 @@
 	uint32_t fragment_offset;
 	struct ffa_memory_region *region_out = out;
 
-	if (out == NULL || mb == NULL) {
-		ERROR("Invalid parameters!\n");
+	if (mb == NULL) {
+		ERROR("Invalid parameters, please provide valid mailbox.\n");
 		return false;
 	}
 
@@ -364,112 +456,49 @@
 	 * fragments is equal to total_size, the memory transaction has been
 	 * completed.
 	 */
-	total_size = ret.arg1;
-	fragment_size = ret.arg2;
+	total_size = ffa_mem_retrieve_res_total_size(ret);
+	fragment_size = ffa_mem_retrieve_res_frag_size(ret);
+
 	fragment_offset = fragment_size;
 	VERBOSE("total_size=%d, fragment_size=%d, fragment_offset=%d\n",
 		total_size, fragment_size, fragment_offset);
 
-	if (fragment_size > PAGE_SIZE) {
-		ERROR("Fragment should be smaller than RX buffer!\n");
-		return false;
-	}
-	if (total_size > out_size) {
-		ERROR("output buffer is not large enough to store all "
-		      "fragments (total_size=%d, max_size=%d)\n",
-		      total_size, out_size);
-		return false;
-	}
-
-	/*
-	 * Copy the received message to the out buffer. This is necessary
-	 * because `mb->recv` will be overwritten if sending a fragmented
-	 * message.
-	 */
-	memcpy(out, mb->recv, fragment_size);
-
-	if (region_out->receiver_count == 0) {
-		VERBOSE("copied region has no recivers\n");
-		return false;
-	}
-
-	if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
-		VERBOSE("SPMC memory sharing operations support max of %u "
-			"receivers!\n",
-			MAX_MEM_SHARE_RECIPIENTS);
-		return false;
-	}
-
-	while (fragment_offset < total_size) {
-		VERBOSE("Calling again. frag offset: %d; total: %d\n",
-			fragment_offset, total_size);
-		ret = ffa_rx_release();
-		if (ret.fid != FFA_SUCCESS_SMC32) {
-			ERROR("ffa_rx_release() failed: %d\n",
-			      ffa_error_code(ret));
+	if (out != NULL) {
+		if (fragment_size > PAGE_SIZE) {
+			ERROR("Fragment should be smaller than RX buffer!\n");
+			return false;
+		}
+		if (total_size > out_size) {
+			ERROR("Output buffer is not large enough to store all "
+			      "fragments (total_size=%d, max_size=%d)\n",
+			      total_size, out_size);
 			return false;
 		}
 
-		ret = ffa_mem_frag_rx(handle, fragment_offset);
-		if (ret.fid != FFA_MEM_FRAG_TX) {
-			ERROR("ffa_mem_frag_rx() failed: %d\n",
-			      ffa_error_code(ret));
+		/*
+		 * Copy the received message to the out buffer. This is necessary
+		 * because `mb->recv` will be overwritten if sending a fragmented
+		 * message.
+		 */
+		memcpy(out, mb->recv, fragment_size);
+
+		if (region_out->receiver_count == 0) {
+			VERBOSE("Copied region has no recivers\n");
 			return false;
 		}
 
-		if (ffa_frag_handle(ret) != handle) {
-			ERROR("%s: fragment handle mismatch: expected %llu, "
-			      "got "
-			      "%llu\n",
-			      __func__, handle, ffa_frag_handle(ret));
+		if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+			VERBOSE("SPMC memory sharing operations support max of %u "
+				"receivers!\n",
+				MAX_MEM_SHARE_RECIPIENTS);
 			return false;
 		}
-
-		/* Sender MBZ at physical instance. */
-		if (ffa_frag_sender(ret) != 0) {
-			ERROR("%s: fragment sender mismatch: expected %d, got "
-			      "%d\n",
-			      __func__, 0, ffa_frag_sender(ret));
-			return false;
-		}
-
-		fragment_size = ret.arg2;
-		if (fragment_size == 0) {
-			ERROR("%s: fragment size must not be 0\n", __func__);
-			return false;
-		}
-
-		if (fragment_offset + fragment_size > out_size) {
-			ERROR("%s: fragment is too big to fit in out buffer "
-			      "(%d > %d)\n",
-			      __func__, fragment_offset + fragment_size,
-			      out_size);
-			return false;
-		}
-
-		VERBOSE("copying fragment at offset %d with size %d\n",
-			fragment_offset, fragment_size);
-		memcpy((uint8_t *)out + fragment_offset, mb->recv,
-		       fragment_size);
-
-		fragment_offset += fragment_size;
+	} else {
+		VERBOSE("%s: No output buffer provided...\n", __func__);
 	}
 
-	if (fragment_offset != total_size) {
-		ERROR("%s: fragment size mismatch: expected %d, got %d\n",
-		      __func__, total_size, fragment_offset);
-		return false;
-	}
-
-	ret = ffa_rx_release();
-	if (ret.fid != FFA_SUCCESS_SMC32) {
-		ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
-		return false;
-	}
-
-	VERBOSE("Memory Retrieved!\n");
-
-	return true;
+	return hypervisor_retrieve_request_continue(
+			mb, handle, out, out_size, total_size, fragment_offset, false);
 }
 
 bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
@@ -494,9 +523,8 @@
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t remaining_constituent_count,
 	uint32_t sent_length, uint32_t total_length, bool allocator_is_spmc,
-	struct ffa_value ret)
+	struct ffa_value *ret)
 {
-
 	uint64_t handle;
 	uint64_t handle_mask;
 	uint64_t expected_handle_mask =
@@ -509,25 +537,24 @@
 	while (remaining_constituent_count != 0) {
 		VERBOSE("%s: %d constituents left to send.\n", __func__,
 			remaining_constituent_count);
-		if (ret.fid != FFA_MEM_FRAG_RX) {
+		if (ret->fid != FFA_MEM_FRAG_RX) {
 			ERROR("ffa_mem_frax_tx() failed: %d\n",
-			      ffa_error_code(ret));
+			      ffa_error_code(*ret));
 			return false;
 		}
 
 		if (fragment_handle == FFA_MEMORY_HANDLE_INVALID) {
-			fragment_handle = ffa_frag_handle(ret);
-		} else if (ffa_frag_handle(ret) != fragment_handle) {
-			ERROR("%s: fragment handle mismatch: expected %llu, "
-			      "got %llu\n",
-			      __func__, fragment_handle, ffa_frag_handle(ret));
+			fragment_handle = ffa_frag_handle(*ret);
+		} else if (ffa_frag_handle(*ret) != fragment_handle) {
+			ERROR("%s: fragment handle mismatch: expected %llu, got %llu\n",
+			      __func__, fragment_handle, ffa_frag_handle(*ret));
 			return false;
 		}
 
-		if (ret.arg3 != sent_length) {
+		if (ret->arg3 != sent_length) {
 			ERROR("%s: fragment length mismatch: expected %u, got "
 			      "%lu\n",
-			      __func__, sent_length, ret.arg3);
+			      __func__, sent_length, ret->arg3);
 			return false;
 		}
 
@@ -537,7 +564,7 @@
 				remaining_constituent_count,
 			remaining_constituent_count, &fragment_length);
 
-		ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+		*ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
 		sent_length += fragment_length;
 	}
 
@@ -547,13 +574,13 @@
 		return false;
 	}
 
-	if (ret.fid != FFA_SUCCESS_SMC32) {
+	if (ret->fid != FFA_SUCCESS_SMC32) {
 		ERROR("%s: ffa_mem_frax_tx() failed: %d\n", __func__,
-		      ffa_error_code(ret));
+		      ffa_error_code(*ret));
 		return false;
 	}
 
-	handle = ffa_mem_success_handle(ret);
+	handle = ffa_mem_success_handle(*ret);
 	handle_mask = (handle >> FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT) &
 		      FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
 
@@ -613,7 +640,7 @@
 	if (!send_fragmented_memory_region(
 		    send_buffer, constituents, constituent_count,
 		    remaining_constituent_count, fragment_length, total_length,
-		    true, *ret)) {
+		    true, ret)) {
 		return FFA_MEMORY_HANDLE_INVALID;
 	}
 
@@ -721,13 +748,13 @@
 	}
 
 	if (info->exec_context != expected->exec_context) {
-		ERROR("Wrong context. Expected %d, got %d\n",
+		ERROR("Wrong context. Expected %x, got %x\n",
 		      expected->exec_context,
 		      info->exec_context);
 		result = false;
 	}
 	if (info->properties != expected->properties) {
-		ERROR("Wrong properties. Expected %d, got %d\n",
+		ERROR("Wrong properties. Expected %x, got %x\n",
 		      expected->properties,
 		      info->properties);
 		result = false;
@@ -969,7 +996,6 @@
 		return false;
 	}
 
-
 	if (sender != NULL) {
 		*sender = source_vm_id;
 	}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
index ad4040a..b402c58 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
@@ -1580,6 +1580,8 @@
 		return TEST_RESULT_FAIL;
 	}
 
+	ffa_rx_release();
+
 	return TEST_RESULT_SUCCESS;
 }
 
@@ -1606,3 +1608,308 @@
 {
 	return base_ffa_memory_retrieve_request_fail_buffer_realm(true, true);
 }
+
+/**
+ * Do a memory sharing operation over two fragments.
+ * Before the 2nd fragment the TX buffer is set in the realm PAS.
+ * The SPMC should fault, recover from it and return ffa_error(FFA_ERROR_ABORTED).
+ */
+test_result_t test_ffa_memory_share_fragmented_tx_realm(void)
+{
+	struct mailbox_buffers mb;
+	uint32_t remaining_constituent_count = 0;
+	uint32_t total_length;
+	uint32_t fragment_length;
+	struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
+						SP_ID(1), FFA_MEM_SHARE_SMC32);
+	struct ffa_memory_region_constituent constituents[] = {
+		{(void *)four_share_pages, 4, 0},
+		{(void *)share_page, 1, 0}
+	};
+	struct ffa_value ffa_ret;
+	u_register_t ret_rmm;
+	test_result_t ret;
+	uint64_t handle;
+
+	if (get_armv9_2_feat_rme_support() == 0U) {
+		return TEST_RESULT_SKIPPED;
+	}
+
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+	GET_TFTF_MAILBOX(mb);
+
+	register_custom_sync_exception_handler(data_abort_handler);
+
+	/* Only send one constituent to start with. */
+	remaining_constituent_count = ffa_memory_region_init(
+		(struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+		&receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
+		0, FFA_MEMORY_NOT_SPECIFIED_MEM,
+		FFA_MEMORY_CACHE_WRITE_BACK,
+		FFA_MEMORY_INNER_SHAREABLE,
+		&total_length, &fragment_length);
+
+	/* It should have copied them all. */
+	if (remaining_constituent_count > 0) {
+		ERROR("Transaction descriptor initialization failed!\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/*
+	 * Take the size of a constituent from the fragment to force the
+	 * operation to be fragmented.
+	 */
+	fragment_length -= sizeof(struct ffa_memory_region_constituent);
+
+	ffa_ret = ffa_mem_share(total_length, fragment_length);
+
+	if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
+		ERROR("Expected %s after the memory share.\n",
+		      ffa_func_name(FFA_MEM_FRAG_RX));
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	handle = ffa_frag_handle(ffa_ret);
+
+	if (handle == FFA_MEMORY_HANDLE_INVALID) {
+		ERROR("SPMC returned an invalid handle for the operation.\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Prepare the next fragment for the operation. */
+	remaining_constituent_count = ffa_memory_fragment_init(
+		mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_length);
+
+	/*
+	 * Delegate send/tx buffer to a realm. This should make memory sharing operation
+	 * fail.
+	 */
+	ret_rmm = host_rmi_granule_delegate((u_register_t)mb.send);
+
+	if (ret_rmm != 0UL) {
+		INFO("Delegate operation returns 0x%lx for address %p\n",
+		     ret_rmm, mb.send);
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
+
+	if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Undelegate to reestablish the same security state for PAS. */
+	ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.send);
+	if (ret_rmm != 0UL) {
+		ERROR("Undelegate operation returns 0x%lx for address %llx\n",
+		      ret_rmm, (uint64_t)mb.send);
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* This time test should pass. */
+	ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
+
+	if (is_ffa_call_error(ffa_ret)) {
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Reclaim memory to be able to reuse it. */
+	ffa_ret = ffa_mem_reclaim(handle, 0);
+
+	if (is_ffa_call_error(ffa_ret)) {
+		ERROR("Failed to reclaim memory to be used in next test\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	ret = TEST_RESULT_SUCCESS;
+
+exit:
+	unregister_custom_sync_exception_handler();
+
+	return ret;
+}
+
+/**
+ * Do a memory sharing operation over two fragments.
+ * Before the 2nd fragment the RX buffer is set in the realm PAS.
+ * The SPMC should fault, recover from it and return
+ * ffa_error(FFA_ERROR_ABORTED).
+ *
+ * Test Sequence:
+ * - Share memory with SP(1), using a force fragmented approach.
+ * - Initiate an hypervisor retrieve request, and retrieve only
+ *   the first fragment.
+ * - Change the physical address space of NWd RX buffer.
+ * - Invoke the FFA_MEM_FRAG_RX interface, which should abort because
+ *   of previous step.
+ * - Reestablish the PAS of the NWd RX buffer.
+ * - Contiueing with hypervisor retrieve request, and obtain the 2nd
+ *   fragment.
+ * - Reclaim memory for clean-up of SPMC state.
+ */
+test_result_t test_ffa_memory_share_fragmented_rx_realm(void)
+{
+	struct mailbox_buffers mb;
+	uint32_t remaining_constituent_count = 0;
+	uint32_t total_size;
+	uint32_t fragment_size;
+	uint32_t fragment_offset;
+	struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
+						SP_ID(1), FFA_MEM_SHARE_SMC32);
+	struct ffa_memory_region_constituent constituents[] = {
+		{(void *)four_share_pages, 4, 0},
+		{(void *)share_page, 1, 0}
+	};
+	struct ffa_value ffa_ret;
+	u_register_t ret_rmm;
+	test_result_t ret;
+	uint64_t handle;
+
+	if (get_armv9_2_feat_rme_support() == 0U) {
+		return TEST_RESULT_SKIPPED;
+	}
+
+	CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+	GET_TFTF_MAILBOX(mb);
+
+	register_custom_sync_exception_handler(data_abort_handler);
+
+	/* Only send one constituent to start with. */
+	remaining_constituent_count = ffa_memory_region_init(
+		(struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+		&receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
+		0, FFA_MEMORY_NOT_SPECIFIED_MEM,
+		FFA_MEMORY_CACHE_WRITE_BACK,
+		FFA_MEMORY_INNER_SHAREABLE,
+		&total_size, &fragment_size);
+
+	/* It should have copied them all. */
+	if (remaining_constituent_count > 0) {
+		ERROR("Transaction descriptor initialization failed!\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/*
+	 * Take the size of a constituent from the fragment to force the
+	 * operation to be fragmented.
+	 */
+	fragment_size -= sizeof(struct ffa_memory_region_constituent);
+
+	ffa_ret = ffa_mem_share(total_size, fragment_size);
+
+	if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
+		ERROR("Expected %s after the memory share.\n",
+		      ffa_func_name(FFA_MEM_FRAG_RX));
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	handle = ffa_frag_handle(ffa_ret);
+
+	if (handle == FFA_MEMORY_HANDLE_INVALID) {
+		ERROR("SPMC returned an invalid handle for the operation.\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Prepare the next fragment for the operation. */
+	remaining_constituent_count = ffa_memory_fragment_init(
+		mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_size);
+
+	ffa_ret = ffa_mem_frag_tx(handle, fragment_size);
+
+	if (is_ffa_call_error(ffa_ret)) {
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/*
+	 * Request the hypervisor retrieve request.
+	 * Response should be fragmented.
+	 */
+	ffa_hypervisor_retrieve_request_init(mb.send, handle);
+	ffa_ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
+				   sizeof(struct ffa_memory_region));
+
+	if (ffa_func_id(ffa_ret) != FFA_MEM_RETRIEVE_RESP) {
+		ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+		      __func__, ffa_error_code(ffa_ret));
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	total_size = ffa_mem_retrieve_res_total_size(ffa_ret);
+	fragment_size = ffa_mem_retrieve_res_frag_size(ffa_ret);
+	fragment_offset = fragment_size;
+
+	ret_rmm = host_rmi_granule_delegate((u_register_t)mb.recv);
+
+	if (ret_rmm != 0UL) {
+		INFO("Delegate operation returns 0x%lx for address %p\n",
+		     ret_rmm, mb.send);
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	ffa_ret = ffa_rx_release();
+	if (is_ffa_call_error(ffa_ret)) {
+		ERROR("ffa_rx_release() failed.\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Call FFA_MEM_FRAG_RX but expect it to abort. */
+	ffa_ret = ffa_mem_frag_rx(handle, fragment_offset);
+
+	if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
+		ERROR("Expected FFA_MEM_FRAG_RX to have failed with"
+		      "FFA_ERROR_ABORTED.\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Undelegate to reestablish the same security state for PAS. */
+	ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.recv);
+	if (ret_rmm != 0UL) {
+		ERROR("Undelegate operation returns 0x%lx for address %llx\n",
+		      ret_rmm, (uint64_t)mb.send);
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Continue the hypervisor retrieve request. */
+	if (!hypervisor_retrieve_request_continue(
+			&mb, handle, NULL, 0, total_size, fragment_offset, false)) {
+		ERROR("Failed to continue hypervisor retrieve request after"
+		      " restablishing PAS.\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	/* Reclaim memory to be able to reuse it. */
+	ffa_ret = ffa_mem_reclaim(handle, 0);
+
+	if (is_ffa_call_error(ffa_ret)) {
+		ERROR("Failed to reclaim memory to be used in next test\n");
+		ret = TEST_RESULT_FAIL;
+		goto exit;
+	}
+
+	ret = TEST_RESULT_SUCCESS;
+
+exit:
+	unregister_custom_sync_exception_handler();
+
+	return ret;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
index 8ca57f9..5effe53 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
@@ -45,6 +45,7 @@
 		.exec_context = PRIMARY_EXEC_CTX_COUNT,
 		.properties = FFA_PARTITION_AARCH64_EXEC |
 			      FFA_PARTITION_DIRECT_REQ_RECV |
+			      FFA_PARTITION_INDIRECT_MSG |
 			      FFA_PARTITION_NOTIFICATION,
 		.uuid = {PRIMARY_UUID}
 	},
@@ -95,6 +96,7 @@
 			FFA_VERSION_1_1},
 		{"FFA_FEATURE_NPI", FFA_FEATURE_NPI, FFA_ERROR, 0,
 			FFA_VERSION_1_1},
+		{"FFA_YIELD_32", FFA_MSG_YIELD, FFA_ERROR},
 	};
 	unsigned int test_target_size =
 		get_ffa_feature_test_target(&func_ids_target);
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
index 0b7cff1..d39cf5b 100644
--- a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
@@ -86,14 +86,6 @@
  */
 static test_result_t test_self_cluster(void)
 {
-#if PLAT_fvp
-	/*
-	 * Skip this check for fvp plaform due to presence of a known bug
-	 * in 11.26-FVP
-	 */
-	return TEST_RESULT_SKIPPED;
-#endif
-
 	if (tftf_psci_node_hw_state(read_mpidr_el1(), 1) != PSCI_HW_STATE_ON) {
 		DBGMSG("%s: failed\n", __func__);
 		return TEST_RESULT_FAIL;
diff --git a/tftf/tests/tests-memory-access.xml b/tftf/tests/tests-memory-access.xml
index 200c5dd..fbc0e2e 100644
--- a/tftf/tests/tests-memory-access.xml
+++ b/tftf/tests/tests-memory-access.xml
@@ -83,6 +83,10 @@
                 function="test_ffa_hypervisor_retrieve_request_fail_tx_realm" />
       <testcase name="FF-A Memory Relinquish, NWd TX buffer is in realm PAS"
                 function="test_ffa_memory_relinquish_fail_tx_realm" />
+      <testcase name="FF-A Memory Frag Tx, NWd TX buffer is in realm PAS"
+                function="test_ffa_memory_share_fragmented_tx_realm" />
+      <testcase name="FF-A Memory Frag Rx, NWd RX buffer is in realm PAS"
+                function="test_ffa_memory_share_fragmented_rx_realm" />
   </testsuite>
 
 </testsuites>
diff --git a/tftf/tests/tests-versal2.mk b/tftf/tests/tests-versal2.mk
new file mode 100644
index 0000000..a76fdb7
--- /dev/null
+++ b/tftf/tests/tests-versal2.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+TESTS_SOURCES   +=      $(addprefix tftf/tests/plat/amd/versal2/,          \
+	        test_all_cores.c                                           \
+		)
diff --git a/tftf/tests/tests-versal2.xml b/tftf/tests/tests-versal2.xml
new file mode 100644
index 0000000..1a47586
--- /dev/null
+++ b/tftf/tests/tests-versal2.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+  Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+
+  SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+  <!--
+     The "template" testsuite aims at providing template test code as a
+     starting point for developing new tests. These tests don't do anything
+     useful in terms of testing.
+  -->
+  <testsuite name="Versal2 Tests" description="AMD-Xilinx Versal2 platform test code">
+     <testcase name="Multi core test" function="test_power_on_cores" />
+  </testsuite>
+
+</testsuites>