Add PLAT_xxx_ADDR_SPACE_SIZE definitions

Added the definitions `PLAT_PHY_ADDR_SPACE_SIZE` and
`PLAT_VIRT_ADDR_SPACE_SIZE` which specify respectively the physical
and virtual address space size a platform can use.

`ADDR_SPACE_SIZE` is now deprecated. To maintain compatibility, if any
of the previous defines aren't present, the value of `ADDR_SPACE_SIZE`
will be used instead.

For AArch64, register ID_AA64MMFR0_EL1 is checked to calculate the
max PA supported by the hardware and to verify that the previously
mentioned definition is valid. For AArch32, a 40 bit physical
address space is considered.

Added asserts to check for overflows.

Porting guide updated.

Change-Id: Ie8ce1da5967993f0c94dbd4eb9841fc03d5ef8d6
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 5b639b7..a168636 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -31,28 +31,33 @@
 #include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
+#include <bl_common.h>
 #include <cassert.h>
+#include <common_def.h>
 #include <platform_def.h>
+#include <sys/types.h>
 #include <utils.h>
 #include <xlat_tables.h>
 #include "../xlat_tables_private.h"
 
 /*
  * Each platform can define the size of the virtual address space, which is
- * defined in ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the width of
- * said address space. The value of TCR.TxSZ must be in the range 16 to 39 [1],
- * which means that the virtual address space width must be in the range 48 to
- * 25 bits.
+ * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
+ * width of said address space. The value of TCR.TxSZ must be in the range 16
+ * to 39 [1], which means that the virtual address space width must be in the
+ * range 48 to 25 bits.
  *
- * Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
- * For a 4 KB page size, level 0 supports virtual address spaces of widths 48 to
- * 40 bits, level 1 from 39 to 31, and level 2 from 30 to 25. Wider or narrower
- * address spaces are not supported. As a result, level 3 cannot be used as
- * initial lookup level with 4 KB granularity. [2]
+ * Here we calculate the initial lookup level from the value of
+ * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
+ * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
+ * from 30 to 25. Wider or narrower address spaces are not supported. As a
+ * result, level 3 cannot be used as initial lookup level with 4 KB
+ * granularity. [2]
  *
- * For example, for a 35-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 35),
- * TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table D4-11 in
- * the ARM ARM, the initial lookup level for such an address space is 1.
+ * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like
+ * that is 1.
  *
  * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
  * information:
@@ -60,28 +65,31 @@
  * [2] Section D4.2.5
  */
 
-#if ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
+#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
 
-# error "ADDR_SPACE_SIZE is too big."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
 
-#elif ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
 
 # define XLAT_TABLE_LEVEL_BASE	0
-# define NUM_BASE_LEVEL_ENTRIES	(ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
 
-#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
 
 # define XLAT_TABLE_LEVEL_BASE	1
-# define NUM_BASE_LEVEL_ENTRIES	(ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
 
-#elif ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
+#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
 
 # define XLAT_TABLE_LEVEL_BASE	2
-# define NUM_BASE_LEVEL_ENTRIES	(ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES	\
+		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
 
 #else
 
-# error "ADDR_SPACE_SIZE is too small."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
 
 #endif
 
@@ -119,6 +127,25 @@
 	return TCR_PS_BITS_4GB;
 }
 
+#if DEBUG
+/* Physical Address ranges supported in the AArch64 Memory Model */
+static const unsigned int pa_range_bits_arr[] = {
+	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+	PARANGE_0101
+};
+
+static unsigned long long get_max_supported_pa(void)
+{
+	u_register_t pa_range = read_id_aa64mmfr0_el1() &
+						ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+	/* All other values are reserved */
+	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+#endif
+
 void init_xlat_tables(void)
 {
 	unsigned long long max_pa;
@@ -126,8 +153,12 @@
 	print_mmap();
 	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
 			   &max_va, &max_pa);
+
+	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+
 	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
-	assert(max_va < ADDR_SPACE_SIZE);
 }
 
 /*******************************************************************************
@@ -165,7 +196,7 @@
 		/* Set T0SZ to (64 - width of virtual address space) */	\
 		tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |	\
 			TCR_RGN_INNER_WBA |				\
-			(64 - __builtin_ctzl(ADDR_SPACE_SIZE));		\
+			(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
 		tcr |= _tcr_extra;					\
 		write_tcr_el##_el(tcr);					\
 									\