Trusted Firmware-A Tests, version 2.0

This is the first public version of the tests for the Trusted
Firmware-A project. Please see the documentation provided in the
source tree for more details.

Change-Id: I6f3452046a1351ac94a71b3525c30a4ca8db7867
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
Co-authored-by: amobal01 <amol.balasokamble@arm.com>
Co-authored-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Co-authored-by: Asha R <asha.r@arm.com>
Co-authored-by: Chandni Cherukuri <chandni.cherukuri@arm.com>
Co-authored-by: David Cunado <david.cunado@arm.com>
Co-authored-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
Co-authored-by: Douglas Raillard <douglas.raillard@arm.com>
Co-authored-by: dp-arm <dimitris.papastamos@arm.com>
Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
Co-authored-by: Jonathan Wright <jonathan.wright@arm.com>
Co-authored-by: Kévin Petit <kevin.petit@arm.com>
Co-authored-by: Roberto Vargas <roberto.vargas@arm.com>
Co-authored-by: Sathees Balya <sathees.balya@arm.com>
Co-authored-by: Shawon Roy <Shawon.Roy@arm.com>
Co-authored-by: Soby Mathew <soby.mathew@arm.com>
Co-authored-by: Thomas Abraham <thomas.abraham@arm.com>
Co-authored-by: Vikram Kanigiri <vikram.kanigiri@arm.com>
Co-authored-by: Yatharth Kochar <yatharth.kochar@arm.com>
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
new file mode 100644
index 0000000..1a57418
--- /dev/null
+++ b/plat/common/aarch32/platform_helpers.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+#include <platform_def.h>
+
+	.weak	platform_get_core_pos
+	.weak	plat_crash_console_init
+	.weak	plat_crash_console_putc
+	.weak	plat_crash_console_flush
+
+	/* -----------------------------------------------------
+	 *  int platform_get_core_pos(u_register_t mpidr);
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *				  CoreId
+	 * -----------------------------------------------------
+	 */
+func platform_get_core_pos
+	and	r1, r0, #MPIDR_CPU_MASK
+	and	r0, r0, #MPIDR_CLUSTER_MASK
+	add	r0, r1, r0, LSR #6
+	bx	lr
+endfunc platform_get_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	ldr	r0, =PLAT_ARM_UART_BASE
+	ldr	r1, =PLAT_ARM_UART_CLK_IN_HZ
+	ldr	r2, =PL011_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	ldr	r1, =PLAT_ARM_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_flush()
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0 - r1
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_flush
+	ldr	r1, =PLAT_ARM_UART_BASE
+	b	console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
new file mode 100644
index 0000000..c4ea895
--- /dev/null
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+	.local	pcpu_dv_mem_stack
+	.local	platform_normal_stacks
+	.weak	platform_set_stack
+	.weak	platform_get_stack
+	.weak	platform_set_coherent_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_coherent_stack (unsigned long mpidr)
+	 *
+	 * For a given CPU, this function sets the stack pointer
+	 * to a stack allocated in device memory. This stack can
+	 * be used by C code which enables/disables the SCTLR.M
+	 * SCTLR.C bit e.g. while powering down a cpu
+	 * -----------------------------------------------------
+	 */
+func platform_set_coherent_stack
+	mov	r9, lr
+	get_mp_stack pcpu_dv_mem_stack, PCPU_DV_MEM_STACK_SIZE
+	mov	sp, r0
+	bx	r9
+endfunc platform_set_coherent_stack
+
+	/* -----------------------------------------------------
+	 * uintptr_t platform_get_stack (u_register_t mpidr)
+	 *
+	 * For a given CPU, this function returns the stack
+	 * pointer for a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func platform_get_stack
+	mov	r9, lr
+	get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	bx	r9
+endfunc	platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (u_register_t mpidr)
+	 *
+	 * For a given CPU, this function sets the stack
+	 * pointer to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func platform_set_stack
+	mov	r9, lr
+	get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov	sp, r0
+	bx	r9
+endfunc platform_set_stack
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in normal memory.
+	 * Used for C code during runtime execution (when coherent
+	 * stacks are not required).
+	 * Each cpu gets a stack of PLATFORM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, tftf_normal_stacks, \
+		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in device memory.
+	 * Used for C code just before power down or right after
+	 * power up when the MMU or caches need to be turned on
+	 * or off.
+	 * Each cpu gets a stack of PCPU_DV_MEM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack pcpu_dv_mem_stack, tftf_coherent_stacks, \
+		PCPU_DV_MEM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
new file mode 100644
index 0000000..e67ded8
--- /dev/null
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+	.local	platform_normal_stacks
+	.globl	platform_set_stack
+	.globl	platform_get_stack
+
+	/* -----------------------------------------------------
+	 * unsigned long platform_get_stack (unsigned long)
+	 *
+	 * For cold-boot images, only the primary CPU needs a
+	 * stack. This function returns the stack pointer for a
+	 * stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func platform_get_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	bx	lr
+endfunc	platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (unsigned long)
+	 *
+	 * For cold-boot images, only the primary CPU needs a
+	 * stack. This function sets the stack pointer to a stack
+	 * allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func platform_set_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov	sp, r0
+	bx	lr
+endfunc platform_set_stack
+
+	/* -----------------------------------------------------
+	 * Single cpu stack in normal memory.
+	 * Used for C code during boot, PLATFORM_STACK_SIZE bytes
+	 * are allocated
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, ns_bl_normal_stacks, \
+		PLATFORM_STACK_SIZE, 1
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
new file mode 100644
index 0000000..2161638
--- /dev/null
+++ b/plat/common/aarch64/platform_helpers.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+#include <platform_def.h>
+
+	.weak	platform_get_core_pos
+	.weak	plat_crash_console_init
+	.weak	plat_crash_console_putc
+	.weak	plat_crash_console_flush
+
+	/* -----------------------------------------------------
+	 *  int platform_get_core_pos(int mpidr);
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+func platform_get_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc platform_get_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, PLAT_ARM_UART_BASE
+	mov_imm	x1, PLAT_ARM_UART_CLK_IN_HZ
+	mov_imm	x2, PL011_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, PLAT_ARM_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_flush()
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0 - r1
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_flush
+	mov_imm	x1, PLAT_ARM_UART_BASE
+	b	console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
new file mode 100644
index 0000000..fe167cc
--- /dev/null
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+	.local	pcpu_dv_mem_stack
+	.local	platform_normal_stacks
+	.weak	platform_set_stack
+	.weak	platform_get_stack
+	.weak	platform_set_coherent_stack
+
+
+	/* -----------------------------------------------------
+	 * void platform_set_coherent_stack (unsigned long mpidr)
+	 *
+	 * For a given CPU, this function sets the stack pointer
+	 * to a stack allocated in device memory. This stack can
+	 * be used by C code which enables/disables the SCTLR.M
+	 * SCTLR.C bit e.g. while powering down a cpu
+	 * -----------------------------------------------------
+	 */
+func platform_set_coherent_stack
+	mov x9, x30 // lr
+	get_mp_stack pcpu_dv_mem_stack, PCPU_DV_MEM_STACK_SIZE
+	mov sp, x0
+	ret x9
+endfunc platform_set_coherent_stack
+
+	/* -----------------------------------------------------
+	 * unsigned long platform_get_stack (unsigned long mpidr)
+	 *
+	 * For a given CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func platform_get_stack
+	mov x10, x30 // lr
+	get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	ret x10
+endfunc platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (unsigned long mpidr)
+	 *
+	 * For a given CPU, this function sets the stack pointer
+	 * to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func platform_set_stack
+	mov x9, x30 // lr
+	bl  platform_get_stack
+	mov sp, x0
+	ret x9
+endfunc platform_set_stack
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in normal memory.
+	 * Used for C code during runtime execution (when coherent
+	 * stacks are not required).
+	 * Each cpu gets a stack of PLATFORM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, tftf_normal_stacks, \
+		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in device memory.
+	 * Used for C code just before power down or right after
+	 * power up when the MMU or caches need to be turned on
+	 * or off.
+	 * Each cpu gets a stack of PCPU_DV_MEM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack pcpu_dv_mem_stack, tftf_coherent_stacks, \
+		PCPU_DV_MEM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
new file mode 100644
index 0000000..c61b472
--- /dev/null
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+	.local	platform_normal_stacks
+	.globl	platform_set_stack
+	.globl	platform_get_stack
+
+	/* -----------------------------------------------------
+	 * unsigned long platform_get_stack (unsigned long)
+	 *
+	 * For cold-boot images, only the primary CPU needs a
+	 * stack. This function returns the stack pointer for a
+	 * stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func platform_get_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	ret
+endfunc platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (unsigned long)
+	 *
+	 * For cold-boot images, only the primary CPU needs a
+	 * stack. This function sets the stack pointer to a stack
+	 * allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func platform_set_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov sp, x0
+	ret
+endfunc platform_set_stack
+
+	/* -----------------------------------------------------
+	 * Single cpu stack in normal memory.
+	 * Used for C code during boot, PLATFORM_STACK_SIZE bytes
+	 * are allocated
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, ns_bl_normal_stacks, \
+		PLATFORM_STACK_SIZE, 1
diff --git a/plat/common/fwu_nvm_accessors.c b/plat/common/fwu_nvm_accessors.c
new file mode 100644
index 0000000..5b32151
--- /dev/null
+++ b/plat/common/fwu_nvm_accessors.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <fwu_nvm.h>
+#include <io_fip.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <status.h>
+#include <string.h>
+#include <uuid_utils.h>
+
+
+STATUS fwu_nvm_write(unsigned long long offset, const void *buffer, size_t size)
+{
+	uintptr_t nvm_handle;
+	int ret;
+	size_t length_write;
+
+	if (offset + size > FLASH_SIZE)
+		return STATUS_OUT_OF_RESOURCES;
+
+	/* Obtain a handle to the NVM by querying the platfom layer */
+	plat_get_nvm_handle(&nvm_handle);
+
+	/* Seek to the given offset. */
+	ret = io_seek(nvm_handle, IO_SEEK_SET, offset);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	/* Write to the given offset. */
+	ret = io_write(nvm_handle, (const uintptr_t)buffer,
+		size, &length_write);
+	if ((ret != IO_SUCCESS) || (size != length_write))
+		return STATUS_FAIL;
+
+	return STATUS_SUCCESS;
+}
+
+STATUS fwu_nvm_read(unsigned long long offset, void *buffer, size_t size)
+{
+	uintptr_t nvm_handle;
+	int ret;
+	size_t length_read;
+
+	if (offset + size > FLASH_SIZE)
+		return STATUS_OUT_OF_RESOURCES;
+
+	/* Obtain a handle to the NVM by querying the platform layer */
+	plat_get_nvm_handle(&nvm_handle);
+
+	/* Seek to the given offset. */
+	ret = io_seek(nvm_handle, IO_SEEK_SET, offset);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	/* Read from the given offset. */
+	ret = io_read(nvm_handle, (const uintptr_t)buffer,
+		size, &length_read);
+	if ((ret != IO_SUCCESS) || (size != length_read))
+		return STATUS_FAIL;
+
+	return STATUS_SUCCESS;
+}
+
+
+STATUS fwu_update_fip(unsigned long fip_addr)
+{
+	uintptr_t nvm_handle;
+	int ret;
+	size_t bytes;
+	int fip_size;
+	unsigned int fip_read;
+	fip_toc_header_t *toc_header;
+	fip_toc_entry_t *toc_entry;
+
+	/* Obtain a handle to the NVM by querying the platform layer */
+	plat_get_nvm_handle(&nvm_handle);
+
+#if FWU_BL_TEST
+	/* Read the address of backup fip.bin for Firmware Update. */
+	ret = io_seek(nvm_handle, IO_SEEK_SET,
+			FWU_TFTF_TESTCASE_BUFFER_OFFSET);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	ret = io_read(nvm_handle, (const uintptr_t)&fip_addr,
+			sizeof(bytes), &bytes);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+#endif /* FWU_BL_TEST */
+
+	/* If the new FIP address is 0 it means no update. */
+	if (fip_addr == 0)
+		return STATUS_SUCCESS;
+
+	/* Set the ToC Header at the base of the buffer */
+	toc_header = (fip_toc_header_t *)fip_addr;
+
+	/* Check if this FIP is Valid */
+	if ((toc_header->name != TOC_HEADER_NAME) ||
+		(toc_header->serial_number == 0))
+		return STATUS_LOAD_ERROR;
+
+	/* Get to the last NULL TOC entry */
+	toc_entry = (fip_toc_entry_t *)(toc_header + 1);
+	while (!is_uuid_null(&toc_entry->uuid))
+		toc_entry++;
+
+	/* get the total size of this FIP */
+	fip_size = (int)toc_entry->offset_address;
+
+	/* Copy the new FIP in DDR. */
+	memcpy((void *)FIP_IMAGE_TMP_DDR_ADDRESS, (void *)fip_addr, fip_size);
+
+	/* Update the FIP */
+	ret = io_seek(nvm_handle, IO_SEEK_SET, 0);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	ret = io_write(nvm_handle, (const uintptr_t)FIP_IMAGE_TMP_DDR_ADDRESS,
+			fip_size, &bytes);
+	if ((ret != IO_SUCCESS) || fip_size != bytes)
+		return STATUS_LOAD_ERROR;
+
+	/* Read the TOC header after update. */
+	ret = io_seek(nvm_handle, IO_SEEK_SET, 0);
+	if (ret != IO_SUCCESS)
+		return STATUS_LOAD_ERROR;
+
+	ret = io_read(nvm_handle, (const uintptr_t)&fip_read,
+		sizeof(bytes), &bytes);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	/* Check if this FIP is Valid */
+	if (fip_read != TOC_HEADER_NAME)
+		return STATUS_LOAD_ERROR;
+
+#if FWU_BL_TEST
+	unsigned int done_flag = FIP_IMAGE_UPDATE_DONE_FLAG;
+	/* Update the TFTF test case buffer with DONE flag */
+	ret = io_seek(nvm_handle, IO_SEEK_SET,
+			FWU_TFTF_TESTCASE_BUFFER_OFFSET);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+	ret = io_write(nvm_handle, (const uintptr_t)&done_flag,
+			4, &bytes);
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+#endif /* FWU_BL_TEST */
+
+	INFO("FWU Image update success\n");
+
+	return STATUS_SUCCESS;
+}
+
diff --git a/plat/common/image_loader.c b/plat/common/image_loader.c
new file mode 100644
index 0000000..9b27f6f
--- /dev/null
+++ b/plat/common/image_loader.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <image_loader.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+unsigned long get_image_offset(unsigned int image_id)
+{
+	uintptr_t dev_handle;
+	uintptr_t image_handle;
+	uintptr_t image_spec;
+	unsigned long img_offset;
+	int io_result;
+	io_entity_t *entity;
+	fip_file_state_t *fp;
+
+	/* Obtain a reference to the image by querying the platform layer */
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
+		return 0;
+	}
+
+	/* Attempt to access the image */
+	io_result = io_open(dev_handle, image_spec, &image_handle);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
+		return 0;
+	}
+
+	entity = (io_entity_t *)image_handle;
+
+	fp = (fip_file_state_t *)entity->info;
+	img_offset = PLAT_ARM_FWU_FIP_BASE + fp->entry.offset_address;
+
+	(void)io_close(image_handle);
+	(void)io_dev_close(dev_handle);
+
+	return img_offset;
+}
+
+
+unsigned long get_image_size(unsigned int image_id)
+{
+	uintptr_t dev_handle;
+	uintptr_t image_handle;
+	uintptr_t image_spec;
+	size_t image_size;
+	int io_result;
+
+	/* Obtain a reference to the image by querying the platform layer */
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
+		return 0;
+	}
+
+	/* Attempt to access the image */
+	io_result = io_open(dev_handle, image_spec, &image_handle);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
+		return 0;
+	}
+
+	/* Find the size of the image */
+	io_result = io_size(image_handle, &image_size);
+	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
+		WARN("Failed to determine the size of the image id=%u (%i)\n",
+			image_id, io_result);
+	}
+	io_result = io_close(image_handle);
+	io_result = io_dev_close(dev_handle);
+
+	return image_size;
+}
+
+
+int load_image(unsigned int image_id, uintptr_t image_base)
+{
+	uintptr_t dev_handle;
+	uintptr_t image_handle;
+	uintptr_t image_spec;
+	size_t image_size;
+	size_t bytes_read;
+	int io_result;
+
+	/* Obtain a reference to the image by querying the platform layer */
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
+		return io_result;
+	}
+
+	/* Attempt to access the image */
+	io_result = io_open(dev_handle, image_spec, &image_handle);
+	if (io_result != IO_SUCCESS) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
+		return io_result;
+	}
+
+	INFO("Loading image id=%u at address %p\n", image_id, (void *)image_base);
+
+	/* Find the size of the image */
+	io_result = io_size(image_handle, &image_size);
+	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
+		WARN("Failed to determine the size of the image id=%u (%i)\n",
+			image_id, io_result);
+		goto exit;
+	}
+
+	/* Load the image now */
+	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
+		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+		goto exit;
+	}
+
+	/*
+	 * File has been successfully loaded.
+	 * Flush the image so that the next EL can see it.
+	 */
+	flush_dcache_range(image_base, image_size);
+
+	INFO("Image id=%u loaded: %p - %p\n", image_id, (void *)image_base,
+	     (void *)(image_base + image_size - 1));
+
+exit:
+	io_close(image_handle);
+	io_dev_close(dev_handle);
+	return io_result;
+}
+
+
+int load_partial_image(unsigned int image_id,
+		uintptr_t image_base,
+		size_t image_size,
+		unsigned int is_last_block)
+{
+	static uintptr_t dev_handle;
+	static uintptr_t image_handle;
+	uintptr_t image_spec;
+	size_t bytes_read;
+	int io_result;
+
+	if (!image_handle) {
+		/* Obtain a reference to the image by querying the platform layer */
+		io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+		if (io_result != IO_SUCCESS) {
+			WARN("Failed to obtain reference to image id=%u (%i)\n",
+				image_id, io_result);
+			return io_result;
+		}
+
+		/* Attempt to access the image */
+		io_result = io_open(dev_handle, image_spec, &image_handle);
+		if (io_result != IO_SUCCESS) {
+			WARN("Failed to access image id=%u (%i)\n",
+				image_id, io_result);
+			return io_result;
+		}
+	}
+
+	INFO("Loading image id=%u at address %p\n", image_id, (void *)image_base);
+
+	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
+		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+		is_last_block = 0;
+		goto exit;
+	}
+
+	/*
+	 * File has been successfully loaded.
+	 * Flush the image so that the next EL can see it.
+	 */
+	flush_dcache_range(image_base, image_size);
+
+	INFO("Image id=%u loaded: %p - %p\n", image_id, (void *)image_base,
+	     (void *)(image_base + image_size - 1));
+
+exit:
+
+	if (is_last_block == 0) {
+		io_close(image_handle);
+		io_dev_close(dev_handle);
+		image_handle = 0;
+	}
+	return io_result;
+}
+
diff --git a/plat/common/plat_common.c b/plat/common/plat_common.c
new file mode 100644
index 0000000..5713871
--- /dev/null
+++ b/plat/common/plat_common.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <sp805.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * The following platform functions are all weakly defined. They provide typical
+ * implementations that may be re-used by multiple platforms but may also be
+ * overridden by a platform if required.
+ */
+
+#pragma weak tftf_platform_end
+#pragma weak tftf_platform_watchdog_set
+#pragma weak tftf_platform_watchdog_reset
+#pragma weak tftf_plat_configure_mmu
+#pragma weak tftf_plat_enable_mmu
+#pragma weak tftf_plat_reset
+#pragma weak plat_get_prot_regions
+
+#if IMAGE_TFTF
+
+#define IMAGE_RO_BASE	TFTF_BASE
+IMPORT_SYM(uintptr_t,	 __RO_END__,		IMAGE_RO_END);
+
+#define IMAGE_RW_BASE	IMAGE_RO_END
+IMPORT_SYM(uintptr_t,	__TFTF_END__,		IMAGE_RW_END);
+
+IMPORT_SYM(uintptr_t,	__COHERENT_RAM_START__,	COHERENT_RAM_START);
+IMPORT_SYM(uintptr_t,	__COHERENT_RAM_END__,	COHERENT_RAM_END);
+
+#elif IMAGE_NS_BL1U
+
+IMPORT_SYM(uintptr_t,	 __RO_END__,		IMAGE_RO_END_UNALIGNED);
+#define IMAGE_RO_BASE	NS_BL1U_RO_BASE
+#define IMAGE_RO_END	round_up(IMAGE_RO_END_UNALIGNED, PAGE_SIZE)
+
+#define IMAGE_RW_BASE	NS_BL1U_RW_BASE
+IMPORT_SYM(uintptr_t,	__NS_BL1U_RAM_END__,	IMAGE_RW_END);
+
+#elif IMAGE_NS_BL2U
+
+#define IMAGE_RO_BASE	NS_BL2U_BASE
+IMPORT_SYM(uintptr_t,	 __RO_END__,		IMAGE_RO_END);
+
+#define IMAGE_RW_BASE	IMAGE_RO_END
+IMPORT_SYM(uintptr_t,	__NS_BL2U_END__,	IMAGE_RW_END_UNALIGNED);
+#define IMAGE_RW_END	round_up(IMAGE_RW_END_UNALIGNED, PAGE_SIZE)
+
+#endif
+
+void tftf_platform_end(void)
+{
+	/*
+	 * Send EOT (End Of Transmission) on the UART.
+	 * This can be used to shutdown a software model.
+	 */
+	static const char ascii_eot = 4;
+	console_putc(ascii_eot);
+}
+
+void tftf_platform_watchdog_set(void)
+{
+	/* Placeholder function which should be redefined by each platform */
+}
+
+void tftf_platform_watchdog_reset(void)
+{
+	/* Placeholder function which should be redefined by each platform */
+}
+
+void tftf_plat_configure_mmu(void)
+{
+	/* RO data + Code */
+	mmap_add_region(IMAGE_RO_BASE, IMAGE_RO_BASE,
+			IMAGE_RO_END - IMAGE_RO_BASE, MT_CODE);
+
+	/* Data + BSS */
+	mmap_add_region(IMAGE_RW_BASE, IMAGE_RW_BASE,
+			IMAGE_RW_END - IMAGE_RW_BASE, MT_RW_DATA);
+
+#if IMAGE_TFTF
+	mmap_add_region(COHERENT_RAM_START, COHERENT_RAM_START,
+			COHERENT_RAM_END - COHERENT_RAM_START,
+			MT_DEVICE | MT_RW | MT_NS);
+#endif
+
+	mmap_add(tftf_platform_get_mmap());
+	init_xlat_tables();
+
+	tftf_plat_enable_mmu();
+}
+
+void tftf_plat_enable_mmu(void)
+{
+#ifndef AARCH32
+	if (IS_IN_EL1())
+		enable_mmu_el1(0);
+	else if (IS_IN_EL2())
+		enable_mmu_el2(0);
+	else
+		panic();
+#else
+	if (IS_IN_HYP())
+		enable_mmu_hyp(0);
+	else
+		enable_mmu_svc_mon(0);
+#endif
+}
+
+void tftf_plat_reset(void)
+{
+	/*
+	 * SP805 peripheral interrupt is not serviced in TFTF. The reset signal
+	 * generated by it is used to reset the platform.
+	 */
+	sp805_wdog_start(1);
+
+	/*
+	 * Reset might take some execution cycles, Depending on the ratio between
+	 * CPU clock frequency and Watchdog clock frequency
+	 */
+	while (1)
+		;
+}
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+	*nelem = 0;
+	return NULL;
+}
diff --git a/plat/common/plat_state_id.c b/plat/common/plat_state_id.c
new file mode 100644
index 0000000..ce43ba4
--- /dev/null
+++ b/plat/common/plat_state_id.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <tftf.h>
+
+static unsigned int pstate_initialized;
+
+/*
+ * Stores pointer to the state_prop_t for all implemented levels
+ */
+static const plat_state_prop_t *plat_state_ptr[PLAT_MAX_PWR_LEVEL + 1];
+
+/*
+ * Saves number of implemented power states per level
+ */
+static unsigned int power_states_per_level[PLAT_MAX_PWR_LEVEL + 1];
+
+void tftf_init_pstate_framework(void)
+{
+	int i, j;
+
+	if (pstate_initialized)
+		return;
+
+	/* Detect the PSCI power state format used. */
+	tftf_detect_psci_pstate_format();
+
+	/*
+	 * Get and save the pointers to plat_state_prop_t values for all
+	 * levels. Also, store the max number of local states possible for
+	 * each level in power_states_per_level.
+	 */
+	for (i = 0; i <= PLAT_MAX_PWR_LEVEL; i++) {
+		plat_state_ptr[i] = plat_get_state_prop(i);
+		assert(plat_state_ptr[i]);
+
+		for (j = 0; (plat_state_ptr[i]+j)->state_ID != 0; j++)
+			;
+
+		power_states_per_level[i] = j;
+	}
+
+	pstate_initialized = 1;
+}
+
+void tftf_set_next_state_id_idx(unsigned int power_level,
+					unsigned int pstate_id_idx[])
+{
+	unsigned int i;
+#if ENABLE_ASSERTIONS
+	/* Verify that this is a valid power level. */
+	assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+	/*
+	 * Verify if a level has PWR_STATE_INIT_INDEX index, all higher levels
+	 * have to be in PWR_STATE_INIT_INDEX. Not needed to check the top
+	 * power level in the outer loop.
+	 */
+	for (i = 0; i < power_level; i++) {
+		if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX) {
+			for ( ; i <= power_level; i++)
+				assert(pstate_id_idx[i] == PWR_STATE_INIT_INDEX);
+		}
+	}
+#endif
+
+	/* Increment the pstate_id_idx starting from the lowest power level */
+	for (i = 0; i <= power_level; i++) {
+		pstate_id_idx[i]++;
+
+		/*
+		 * Wraparound index if the maximum power states available for
+		 * that level is reached and proceed to next level.
+		 */
+		if (pstate_id_idx[i] == power_states_per_level[i])
+			pstate_id_idx[i] = 0;
+		else
+			break;
+	}
+
+	/*
+	 * Check if the requested power level has wrapped around. If it has,
+	 * reset pstate_id_idx.
+	 */
+	if (i > power_level) {
+		for (i = 0; i <= power_level; i++)
+			pstate_id_idx[i] = PWR_STATE_INIT_INDEX;
+	}
+}
+
+void tftf_set_deepest_pstate_idx(unsigned int power_level,
+				unsigned int pstate_id_idx[])
+{
+	int i;
+
+	/* Verify that this is a valid power level. */
+	assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+	/*
+	 * Assign the highest pstate_id_idx starting from the lowest power
+	 * level
+	 */
+	for (i = 0; i <= power_level; i++)
+		pstate_id_idx[i] = power_states_per_level[i] - 1;
+}
+
+
+int tftf_get_pstate_vars(unsigned int *test_power_level,
+				unsigned int *test_suspend_type,
+				unsigned int *suspend_state_id,
+				unsigned int pstate_id_idx[])
+{
+	unsigned int i;
+	int state_id = 0;
+	int suspend_type;
+	int suspend_depth;
+	int psci_ret = PSCI_E_SUCCESS;
+	const plat_state_prop_t *local_state;
+
+	/* Atleast one entry should be valid to generate correct power state params */
+	assert(pstate_id_idx[0] != PWR_STATE_INIT_INDEX &&
+			pstate_id_idx[0] <= power_states_per_level[0]);
+
+	suspend_depth = (plat_state_ptr[0] + pstate_id_idx[0])->suspend_depth;
+	suspend_type = (plat_state_ptr[0] + pstate_id_idx[0])->is_pwrdown;
+
+	for (i = 0; i <= PLAT_MAX_PWR_LEVEL; i++) {
+
+		/* Reached all levels with the valid power index values */
+		if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX)
+			break;
+
+		assert(pstate_id_idx[i] <= power_states_per_level[i]);
+
+		local_state = plat_state_ptr[i] + pstate_id_idx[i];
+		state_id |= (local_state->state_ID << i * PLAT_LOCAL_PSTATE_WIDTH);
+
+		if (local_state->is_pwrdown > suspend_type)
+			suspend_type = local_state->is_pwrdown;
+
+		if (local_state->suspend_depth > suspend_depth)
+			psci_ret = PSCI_E_INVALID_PARAMS;
+		else
+			suspend_depth = local_state->suspend_depth;
+	}
+
+	*test_suspend_type = suspend_type;
+	*suspend_state_id = state_id;
+	*test_power_level = --i;
+
+	return psci_ret;
+}
+
+void tftf_set_next_local_state_id_idx(unsigned int power_level,
+						unsigned int pstate_id_idx[])
+{
+	assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+	if (pstate_id_idx[power_level] + 1 >= power_states_per_level[power_level]) {
+		pstate_id_idx[power_level] = PWR_STATE_INIT_INDEX;
+		return;
+	}
+
+	pstate_id_idx[power_level]++;
+}
diff --git a/plat/common/plat_topology.c b/plat/common/plat_topology.c
new file mode 100644
index 0000000..a7920c3
--- /dev/null
+++ b/plat/common/plat_topology.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <stdlib.h>
+
+#define CPU_INDEX_IS_VALID(_cpu_idx)	\
+	(((_cpu_idx) - tftf_pwr_domain_start_idx[0]) < PLATFORM_CORE_COUNT)
+
+#define IS_A_CPU_NODE(_cpu_idx)		(tftf_pd_nodes[(_cpu_idx)].level == 0)
+
+#define CPU_NODE_IS_VALID(_cpu_node)	\
+	(CPU_INDEX_IS_VALID(_cpu_node) && IS_A_CPU_NODE(_cpu_node))
+
+/*
+ * Global variable to check that the platform topology is not queried until it
+ * has been setup.
+ */
+static unsigned int topology_setup_done;
+
+/*
+ * Store the start indices of power domains at various levels. This array makes it
+ * easier to traverse the topology tree if the power domain level is known.
+ */
+unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
+
+/* The grand array to store the platform power domain topology */
+tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
+
+#if DEBUG
+/*
+ * Debug function to display the platform topology.
+ * Does not print absent affinity instances.
+ */
+static void dump_topology(void)
+{
+	unsigned int cluster_idx, cpu_idx, count;
+
+	NOTICE("Platform topology:\n");
+
+	NOTICE("  %u cluster(s)\n", tftf_get_total_clusters_count());
+	NOTICE("  %u CPU(s) (total)\n\n", tftf_get_total_cpus_count());
+
+	for (cluster_idx = PWR_DOMAIN_INIT;
+	     cluster_idx = tftf_get_next_peer_domain(cluster_idx, 1),
+	     cluster_idx != PWR_DOMAIN_INIT;) {
+		count = 0;
+		for (cpu_idx = tftf_pd_nodes[cluster_idx].cpu_start_node;
+		     cpu_idx < (tftf_pd_nodes[cluster_idx].cpu_start_node +
+				     tftf_pd_nodes[cluster_idx].ncpus);
+		     cpu_idx++) {
+			if (tftf_pd_nodes[cpu_idx].is_present)
+				count++;
+		}
+		NOTICE("  Cluster #%u   [%u CPUs]\n",
+				cluster_idx - tftf_pwr_domain_start_idx[1],
+				count);
+		for (cpu_idx = PWR_DOMAIN_INIT;
+		     cpu_idx = tftf_get_next_cpu_in_pwr_domain(cluster_idx, cpu_idx),
+		     cpu_idx != PWR_DOMAIN_INIT;) {
+			NOTICE("    CPU #%u   [MPID: 0x%x]\n",
+					cpu_idx - tftf_pwr_domain_start_idx[0],
+					tftf_get_mpidr_from_node(cpu_idx));
+		}
+	}
+	NOTICE("\n");
+}
+#endif
+
+unsigned int tftf_get_total_aff_count(unsigned int aff_lvl)
+{
+	unsigned int count = 0;
+	unsigned int node_idx;
+
+	assert(topology_setup_done == 1);
+
+	if (aff_lvl > PLATFORM_MAX_AFFLVL)
+		return count;
+
+	node_idx = tftf_pwr_domain_start_idx[aff_lvl];
+
+	while (tftf_pd_nodes[node_idx].level == aff_lvl) {
+		if (tftf_pd_nodes[node_idx].is_present)
+			count++;
+		node_idx++;
+	}
+
+	return count;
+}
+
+unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
+				      unsigned int pwr_lvl)
+{
+	assert(topology_setup_done == 1);
+
+	assert(pwr_lvl <= PLATFORM_MAX_AFFLVL);
+
+	if (pwr_domain_idx == PWR_DOMAIN_INIT) {
+		pwr_domain_idx = tftf_pwr_domain_start_idx[pwr_lvl];
+		if (tftf_pd_nodes[pwr_domain_idx].is_present)
+			return pwr_domain_idx;
+	}
+
+	assert(pwr_domain_idx < PLATFORM_NUM_AFFS &&
+			tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
+
+	for (++pwr_domain_idx; (pwr_domain_idx < PLATFORM_NUM_AFFS)
+				&& (tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
+				pwr_domain_idx++) {
+		if (tftf_pd_nodes[pwr_domain_idx].is_present)
+			return pwr_domain_idx;
+	}
+
+	return PWR_DOMAIN_INIT;
+}
+
+unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
+				      unsigned int cpu_node)
+{
+	unsigned int cpu_end_node;
+
+	assert(topology_setup_done == 1);
+	assert(pwr_domain_idx != PWR_DOMAIN_INIT
+			&& pwr_domain_idx < PLATFORM_NUM_AFFS);
+
+	if (cpu_node == PWR_DOMAIN_INIT) {
+		cpu_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node;
+		if (tftf_pd_nodes[cpu_node].is_present)
+			return cpu_node;
+	}
+
+	assert(CPU_NODE_IS_VALID(cpu_node));
+
+	cpu_end_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node
+			+ tftf_pd_nodes[pwr_domain_idx].ncpus - 1;
+
+	assert(cpu_end_node < PLATFORM_NUM_AFFS);
+
+	for (++cpu_node; cpu_node <= cpu_end_node; cpu_node++) {
+		if (tftf_pd_nodes[cpu_node].is_present)
+			return cpu_node;
+	}
+
+	return PWR_DOMAIN_INIT;
+}
+
+/*
+ * Helper function to get the parent nodes of a particular CPU power
+ * domain.
+ */
+static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
+				      unsigned int end_lvl,
+				      unsigned int node_index[])
+{
+	unsigned int parent_node = tftf_pd_nodes[cpu_node].parent_node;
+	unsigned int i;
+
+	for (i = 1; i <= end_lvl; i++) {
+		node_index[i - 1] = parent_node;
+		parent_node = tftf_pd_nodes[parent_node].parent_node;
+	}
+}
+
+/*******************************************************************************
+ * This function updates cpu_start_node and ncpus field for each of the nodes
+ * in tftf_pd_nodes[]. It does so by comparing the parent nodes of each of
+ * the CPUs and check whether they match with the parent of the previous
+ * CPU. The basic assumption for this work is that children of the same parent
+ * are allocated adjacent indices. The platform should ensure this through
+ * proper mapping of the CPUs to indices via platform_get_core_pos() API.
+ *
+ * It also updates the 'is_present' field for non-cpu power domains. It does
+ * this by checking the 'is_present' field of the child cpu nodes and updates
+ * it if any of the child cpu nodes are present.
+ *******************************************************************************/
+static void update_pwrlvl_limits(void)
+{
+	int cpu_id, j, is_present;
+	unsigned int nodes_idx[PLATFORM_MAX_AFFLVL] = {-1};
+	unsigned int temp_index[PLATFORM_MAX_AFFLVL];
+
+	unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
+
+	for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
+		get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
+						PLATFORM_MAX_AFFLVL,
+						temp_index);
+		is_present = tftf_pd_nodes[cpu_id + cpu_node_offset].is_present;
+
+		for (j = PLATFORM_MAX_AFFLVL - 1; j >= 0; j--) {
+			if (temp_index[j] != nodes_idx[j]) {
+				nodes_idx[j] = temp_index[j];
+				tftf_pd_nodes[nodes_idx[j]].cpu_start_node
+							= cpu_id + cpu_node_offset;
+				if (!tftf_pd_nodes[nodes_idx[j]].is_present)
+					tftf_pd_nodes[nodes_idx[j]].is_present = is_present;
+			}
+			tftf_pd_nodes[nodes_idx[j]].ncpus++;
+		}
+	}
+}
+
+/******************************************************************************
+ * This function populates the power domain topology array 'tftf_pd_nodes[]'
+ * based on the power domain description retrieved from the platform layer.
+ * It also updates the start index of each power domain level in
+ * tftf_pwr_domain_start_idx[]. The uninitialized fields of 'tftf_pd_nodes[]'
+ * for the non CPU power domain will be initialized in update_pwrlvl_limits().
+ *****************************************************************************/
+static void populate_power_domain_tree(void)
+{
+	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl,
+			node_index = 0, parent_idx = 0, num_children;
+	int num_level = PLATFORM_MAX_AFFLVL;
+	const unsigned char *plat_array;
+
+	plat_array = tftf_plat_get_pwr_domain_tree_desc();
+
+	/*
+	 * For each level the inputs are:
+	 * - number of nodes at this level in plat_array i.e. num_nodes_at_lvl
+	 *   This is the sum of values of nodes at the parent level.
+	 * - Index of first entry at this level in the plat_array i.e.
+	 *   parent_idx.
+	 * - Index of first free entry in tftf_pd_nodes[].
+	 */
+	while (num_level >= 0) {
+		num_nodes_at_next_lvl = 0;
+
+		/* Store the start index for every level */
+		tftf_pwr_domain_start_idx[num_level] = node_index;
+
+		/*
+		 * For each entry (parent node) at this level in the plat_array:
+		 * - Find the number of children
+		 * - Allocate a node in a power domain array for each child
+		 * - Set the parent of the child to the parent_node_index - 1
+		 * - Increment parent_node_index to point to the next parent
+		 * - Accumulate the number of children at next level.
+		 */
+		for (i = 0; i < num_nodes_at_lvl; i++) {
+			assert(parent_idx <=
+				PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT);
+			num_children = plat_array[parent_idx];
+
+			for (j = node_index;
+				j < node_index + num_children; j++) {
+				/* Initialize the power domain node */
+				tftf_pd_nodes[j].parent_node = parent_idx - 1;
+				tftf_pd_nodes[j].level = num_level;
+
+				/* Additional initializations for CPU power domains */
+				if (num_level == 0) {
+					/* Calculate the cpu id from node index */
+					int cpu_id =  j - tftf_pwr_domain_start_idx[0];
+
+					assert(cpu_id < PLATFORM_CORE_COUNT);
+
+					/* Set the mpidr of cpu node */
+					tftf_pd_nodes[j].mpidr =
+						tftf_plat_get_mpidr(cpu_id);
+					if (tftf_pd_nodes[j].mpidr != INVALID_MPID)
+						tftf_pd_nodes[j].is_present = 1;
+
+					tftf_pd_nodes[j].cpu_start_node = j;
+					tftf_pd_nodes[j].ncpus = 1;
+				}
+			}
+			node_index = j;
+			num_nodes_at_next_lvl += num_children;
+			parent_idx++;
+		}
+
+		num_nodes_at_lvl = num_nodes_at_next_lvl;
+		num_level--;
+	}
+
+	/* Validate the sanity of array exported by the platform */
+	assert(j == PLATFORM_NUM_AFFS);
+}
+
+
+void tftf_init_topology(void)
+{
+	populate_power_domain_tree();
+	update_pwrlvl_limits();
+	topology_setup_done = 1;
+#if DEBUG
+	dump_topology();
+#endif
+}
+
+unsigned int tftf_topology_next_cpu(unsigned int cpu_node)
+{
+	assert(topology_setup_done == 1);
+
+	if (cpu_node == PWR_DOMAIN_INIT) {
+		cpu_node = tftf_pwr_domain_start_idx[0];
+		if (tftf_pd_nodes[cpu_node].is_present)
+			return cpu_node;
+	}
+
+	assert(CPU_NODE_IS_VALID(cpu_node));
+
+	for (++cpu_node; cpu_node < PLATFORM_NUM_AFFS; cpu_node++) {
+		if (tftf_pd_nodes[cpu_node].is_present)
+			return cpu_node;
+	}
+
+	return PWR_DOMAIN_INIT;
+}
+
+
+unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node)
+{
+	assert(topology_setup_done == 1);
+
+	assert(CPU_NODE_IS_VALID(cpu_node));
+
+	if (tftf_pd_nodes[cpu_node].is_present)
+		return tftf_pd_nodes[cpu_node].mpidr;
+
+	return INVALID_MPID;
+}
+
+unsigned int tftf_find_any_cpu_other_than(unsigned exclude_mpid)
+{
+	unsigned int cpu_node, mpidr;
+
+	for_each_cpu(cpu_node) {
+		mpidr = tftf_get_mpidr_from_node(cpu_node);
+		if (mpidr != exclude_mpid)
+			return mpidr;
+	}
+
+	return INVALID_MPID;
+}
+
+unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid)
+{
+	unsigned int cpu_node, mpidr;
+	unsigned int possible_cpus_cnt = 0;
+	unsigned int possible_cpus[PLATFORM_CORE_COUNT];
+
+	for_each_cpu(cpu_node) {
+		mpidr = tftf_get_mpidr_from_node(cpu_node);
+		if (mpidr != exclude_mpid)
+			possible_cpus[possible_cpus_cnt++] = mpidr;
+	}
+
+	if (possible_cpus_cnt == 0)
+		return INVALID_MPID;
+
+	return possible_cpus[rand() % possible_cpus_cnt];
+}
diff --git a/plat/common/tftf_nvm_accessors.c b/plat/common/tftf_nvm_accessors.c
new file mode 100644
index 0000000..f6d0031
--- /dev/null
+++ b/plat/common/tftf_nvm_accessors.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <spinlock.h>
+#include <status.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#if USE_NVM
+/* Used to serialize write operations from different CPU's */
+static spinlock_t flash_access_lock;
+#endif
+
+STATUS tftf_nvm_write(unsigned long long offset, const void *buffer, size_t size)
+{
+#if USE_NVM
+	int ret;
+	uintptr_t nvm_handle;
+	size_t length_written;
+#endif
+
+	if (offset + size > TFTF_NVM_SIZE)
+		return STATUS_OUT_OF_RESOURCES;
+
+#if USE_NVM
+	/* Obtain a handle to the NVM by querying the platfom layer */
+	plat_get_nvm_handle(&nvm_handle);
+
+	spin_lock(&flash_access_lock);
+
+	ret = io_seek(nvm_handle, IO_SEEK_SET,
+					offset + TFTF_NVM_OFFSET);
+	if (ret != IO_SUCCESS)
+		goto fail;
+
+	ret = io_write(nvm_handle, (const uintptr_t)buffer, size,
+							 &length_written);
+	if (ret != IO_SUCCESS)
+		goto fail;
+
+	assert(length_written == size);
+fail:
+	spin_unlock(&flash_access_lock);
+
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+
+#else
+	uintptr_t addr = DRAM_BASE + TFTF_NVM_OFFSET + offset;
+	memcpy((void *)addr, buffer, size);
+#endif
+
+	return STATUS_SUCCESS;
+}
+
+STATUS tftf_nvm_read(unsigned long long offset, void *buffer, size_t size)
+{
+#if USE_NVM
+	int ret;
+	uintptr_t nvm_handle;
+	size_t length_read;
+#endif
+
+	if (offset + size > TFTF_NVM_SIZE)
+		return STATUS_OUT_OF_RESOURCES;
+
+#if USE_NVM
+	/* Obtain a handle to the NVM by querying the platfom layer */
+	plat_get_nvm_handle(&nvm_handle);
+
+	spin_lock(&flash_access_lock);
+
+	ret = io_seek(nvm_handle, IO_SEEK_SET, TFTF_NVM_OFFSET + offset);
+	if (ret != IO_SUCCESS)
+		goto fail;
+
+	ret = io_read(nvm_handle, (uintptr_t)buffer, size, &length_read);
+	if (ret != IO_SUCCESS)
+		goto fail;
+
+	assert(length_read == size);
+fail:
+	spin_unlock(&flash_access_lock);
+
+	if (ret != IO_SUCCESS)
+		return STATUS_FAIL;
+#else
+	uintptr_t addr = DRAM_BASE + TFTF_NVM_OFFSET + offset;
+	memcpy(buffer, (void *)addr, size);
+#endif
+
+	return STATUS_SUCCESS;
+}
+