Synchronize files with TF-A repository

Change-Id: Ieb56d0639efd29c2695751b2b36cc98ce2c90dab
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index c4e4216..7ae3f96 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -42,6 +42,14 @@
 	/* Physical address space size for long descriptor format. */
 	return (1ULL << 40) - 1ULL;
 }
+
+/*
+ * Return minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void)
+{
+	return MIN_VIRT_ADDR_SPACE_SIZE;
+}
 #endif /* ENABLE_ASSERTIONS*/
 
 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
@@ -190,7 +198,12 @@
 	if (max_va != UINT32_MAX) {
 		uintptr_t virtual_addr_space_size = max_va + 1U;
 
-		assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+		assert(virtual_addr_space_size >=
+			xlat_get_min_virt_addr_space_size());
+		assert(virtual_addr_space_size <=
+			MAX_VIRT_ADDR_SPACE_SIZE);
+		assert(IS_POWER_OF_TWO(virtual_addr_space_size));
+
 		/*
 		 * __builtin_ctzll(0) is undefined but here we are guaranteed
 		 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
index 504c03c..0f0aaa1 100644
--- a/lib/xlat_tables_v2/aarch64/enable_mmu.S
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -86,9 +86,10 @@
 	.endm
 
 	/*
-	 * Define MMU-enabling functions for EL1 and EL3:
+	 * Define MMU-enabling functions for EL1, EL2 and EL3:
 	 *
 	 *  enable_mmu_direct_el1
+	 *  enable_mmu_direct_el2
 	 *  enable_mmu_direct_el3
 	 */
 	define_mmu_enable_func 1
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 08dd920..2ce3017 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -99,6 +99,21 @@
 
 	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
 }
+
+/*
+ * Return minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void)
+{
+	uintptr_t ret;
+
+	if (is_armv8_4_ttst_present())
+		ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
+	else
+		ret = MIN_VIRT_ADDR_SPACE_SIZE;
+
+	return ret;
+}
 #endif /* ENABLE_ASSERTIONS*/
 
 bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
@@ -219,7 +234,11 @@
 	assert(max_va < ((uint64_t)UINTPTR_MAX));
 
 	virtual_addr_space_size = (uintptr_t)max_va + 1U;
-	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
+	assert(virtual_addr_space_size >=
+		xlat_get_min_virt_addr_space_size());
+	assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE);
+	assert(IS_POWER_OF_TWO(virtual_addr_space_size));
 
 	/*
 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 91d3a03..139b71d 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -19,7 +19,7 @@
 #include "xlat_tables_private.h"
 
 /* Helper function that cleans the data cache only if it is enabled. */
-static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
 {
 	if (is_dcache_enabled())
 		clean_dcache_range(addr, size);
@@ -863,7 +863,7 @@
 	 */
 	for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
 
-		if (align_check & XLAT_BLOCK_MASK(level))
+		if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
 			continue;
 
 		mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
@@ -1101,6 +1101,36 @@
 	return 0;
 }
 
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+			    uintptr_t va_max, struct mmap_region *mmap,
+			    unsigned int mmap_num, uint64_t **tables,
+			    unsigned int tables_num, uint64_t *base_table,
+			    int xlat_regime, int *mapped_regions)
+{
+	ctx->xlat_regime = xlat_regime;
+
+	ctx->pa_max_address = pa_max;
+	ctx->va_max_address = va_max;
+
+	ctx->mmap = mmap;
+	ctx->mmap_num = mmap_num;
+	memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
+
+	ctx->tables = (void *) tables;
+	ctx->tables_num = tables_num;
+
+	uintptr_t va_space_size = va_max + 1;
+	ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
+	ctx->base_table = base_table;
+	ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
+
+	ctx->tables_mapped_regions = mapped_regions;
+
+	ctx->max_pa = 0;
+	ctx->max_va = 0;
+	ctx->initialized = 0;
+}
+
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
 
 void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
@@ -1114,6 +1144,11 @@
 
 	mmap_region_t *mm = ctx->mmap;
 
+	assert(ctx->va_max_address >=
+		(xlat_get_min_virt_addr_space_size() - 1U));
+	assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
+	assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
+
 	xlat_mmap_print(mm);
 
 	/* All tables must be zeroed before mapping any region. */
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 528996a..8f51686 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -100,4 +100,9 @@
 /* Returns true if the data cache is enabled at the current EL. */
 bool is_dcache_enabled(void);
 
+/*
+ * Returns minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void);
+
 #endif /* XLAT_TABLES_PRIVATE_H */