Handle 16k and 64k translation granules

Enable Xlat to handle 16k and 64k translation granules along different
VA bit counts.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iab4fe066e813d5b75a5a6d45ba8498867cc5c541
diff --git a/src/address.rs b/src/address.rs
index a8f2c39..f1b5220 100644
--- a/src/address.rs
+++ b/src/address.rs
@@ -3,7 +3,7 @@
 
 use core::ops::Range;
 
-use super::Xlat;
+use super::TranslationGranule;
 
 #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
 pub struct PhysicalAddress(pub(super) usize);
@@ -62,12 +62,20 @@
         PhysicalAddress(self.0)
     }
 
-    pub const fn mask_for_level(self, level: usize) -> Self {
-        Self(self.0 & (Xlat::GRANULE_SIZES[level] - 1))
+    pub const fn mask_for_level<const VA_BITS: usize>(
+        self,
+        translation_granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> Self {
+        Self(self.0 & (translation_granule.block_size_at_level(level) - 1))
     }
 
-    pub const fn get_level_index(self, level: usize) -> usize {
-        self.0 / Xlat::GRANULE_SIZES[level]
+    pub const fn get_level_index<const VA_BITS: usize>(
+        self,
+        translation_granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> usize {
+        self.0 >> translation_granule.total_bits_at_level(level)
     }
 
     pub const fn mask_bits(self, mask: usize) -> Self {
diff --git a/src/descriptor.rs b/src/descriptor.rs
index 24ed8df..0731856 100644
--- a/src/descriptor.rs
+++ b/src/descriptor.rs
@@ -12,6 +12,7 @@
 use crate::MemoryAttributesIndex;
 
 use super::address::PhysicalAddress;
+use super::TranslationGranule;
 
 /// Memory shareability
 #[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
@@ -112,17 +113,14 @@
 impl Descriptor {
     const ATTR_MASK: u64 = 0xfff8_0000_0000_0ffc;
     const DESCRIPTOR_TYPE_MASK: u64 = 0b11;
-    pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
     const INVALID_DESCRIPTOR_VALUE: u64 = 0x0;
     const NEXT_ATTR_MASK: u64 = 0xf800_0000_0000_0000;
-    const OA_MASK: u64 = 0x0000_ffff_ffff_f000;
     const TABLE_BIT: u64 = 0b10;
-    const TABLE_ENTRY_COUNT: usize = 512;
     const TA_MASK: u64 = 0x0000_ffff_ffff_f000;
     const VALID_BIT: u64 = 0b01;
 
     /// Query descriptor type
-    pub fn get_descriptor_type(&self, level: usize) -> DescriptorType {
+    pub fn get_descriptor_type(&self, level: isize) -> DescriptorType {
         assert!(level <= 3);
 
         let desc_type_bits = self.get() & Self::DESCRIPTOR_TYPE_MASK;
@@ -150,9 +148,10 @@
     // Block descriptor functions
 
     /// Set block descriptor
-    pub fn set_block_descriptor(
+    pub fn set_block_descriptor<const VA_BITS: usize>(
         &mut self,
-        level: usize,
+        granule: TranslationGranule<VA_BITS>,
+        level: isize,
         output_address: PhysicalAddress,
         attributes: Attributes,
     ) {
@@ -160,24 +159,107 @@
 
         assert!(level <= 3);
         assert!(self.get_descriptor_type(level) != DescriptorType::Table);
-        assert_eq!(0, output_address.0 & !Self::get_oa_mask(level));
         assert_eq!(0, attr & !Self::ATTR_MASK);
 
+        let oa_granule_mask = !(granule.block_size_at_level(level) - 1);
+
+        // Figure D8-14 VMSAv8-64 Block descriptor formats
+        let oa_bits = match granule {
+            // 4KB and 16KB granules, 52-bit OA
+            #[cfg(feature = "feat_lpa2")]
+            TranslationGranule::Granule4k | TranslationGranule::Granule16k => {
+                let oa_mask = oa_granule_mask & 0x000f_ffff_ffff_f000;
+                assert_eq!(0, output_address.0 & !oa_mask);
+
+                let address = output_address.0 & oa_mask;
+
+                // OA[49:n] remains in place, OA[51:50] is mapped to [9:8]
+                let lsbs = address & 0x0003_ffff_ffff_f000;
+                let msbs = ((address >> 50) & 0x3) << 8;
+
+                lsbs | msbs
+            }
+
+            // 64KB granule, 52-bit OA
+            #[cfg(feature = "feat_lpa")]
+            TranslationGranule::Granule64k => {
+                let oa_mask = oa_granule_mask & 0x000f_ffff_ffff_0000;
+                assert_eq!(0, output_address.0 & !oa_mask);
+
+                let address = output_address.0 & oa_mask;
+
+                // OA[47:n] remains in place, OA[51:48] is mapped to [15:12]
+                let lsbs = address & 0x0000_ffff_ffff_0000;
+                let msbs = ((address >> 48) & 0xf) << 12;
+
+                lsbs | msbs
+            }
+
+            // 4KB, 16KB, and 64KB granules, 48-bit OA
+            #[cfg(not(all(feature = "feat_lpa", feature = "feat_lpa2")))]
+            _ => {
+                let oa_mask = oa_granule_mask & 0x0000_ffff_ffff_f000;
+                assert_eq!(0, output_address.0 & !oa_mask);
+                output_address.0 & oa_mask
+            }
+        };
+
         let table_bit = if level < 3 { 0 } else { Self::TABLE_BIT };
 
-        self.set(Self::VALID_BIT | table_bit | output_address.0 as u64 | attr);
+        self.set(Self::VALID_BIT | table_bit | oa_bits as u64 | attr);
     }
 
     /// Get output address from the block descriptor
-    pub fn get_block_output_address(&self, level: usize) -> PhysicalAddress {
+    pub fn get_block_output_address<const VA_BITS: usize>(
+        &self,
+        granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> PhysicalAddress {
         assert!(level <= 3);
         assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
 
-        PhysicalAddress((self.get() & Self::OA_MASK) as usize)
+        let oa_granule_mask = !(granule.block_size_at_level(level) - 1);
+        let descriptor_value = self.get();
+
+        // Figure D8-14 VMSAv8-64 Block descriptor formats
+        let pa = match granule {
+            // 4KB and 16KB granules, 52-bit OA
+            #[cfg(feature = "feat_lpa2")]
+            TranslationGranule::Granule4k | TranslationGranule::Granule16k => {
+                let oa_mask = oa_granule_mask & 0x000f_ffff_ffff_f000;
+
+                // OA[49:n] remains in place, OA[51:50] is mapped from [9:8]
+                let lsbs = descriptor_value & 0x0003_ffff_ffff_f000;
+                let msbs = ((descriptor_value >> 8) & 0x3) << 50;
+
+                (lsbs | msbs) as usize & oa_mask
+            }
+
+            // 64KB granule, 52-bit OA
+            #[cfg(feature = "feat_lpa")]
+            TranslationGranule::Granule64k => {
+                let oa_mask = oa_granule_mask & 0x000f_ffff_ffff_0000;
+
+                // OA[47:n] remains in place, OA[51:48] is mapped from [15:12]
+                let lsbs = descriptor_value & 0x0003_ffff_ffff_0000;
+                let msbs = ((descriptor_value >> 12) & 0xf) << 48;
+
+                (lsbs | msbs) as usize & oa_mask
+            }
+
+            // 4KB, 16KB, and 64KB granules, 48-bit OA
+            #[cfg(not(all(feature = "feat_lpa", feature = "feat_lpa2")))]
+            _ => {
+                let oa_mask = oa_granule_mask & 0x0000_ffff_ffff_f000;
+                descriptor_value as usize & oa_mask
+            }
+        };
+
+        PhysicalAddress(pa)
     }
 
     /// Set the attributes of the block descriptor
-    pub fn set_block_attributes(&mut self, level: usize, attributes: Attributes) {
+    pub fn set_block_attributes(&mut self, level: isize, attributes: Attributes) {
         assert!(level <= 3);
         let attr: u64 = attributes.into();
         assert_eq!(0, attr & !Self::ATTR_MASK);
@@ -187,7 +269,7 @@
     }
 
     /// Get the attributes of the block descriptor
-    pub fn get_block_attributes(&self, level: usize) -> Attributes {
+    pub fn get_block_attributes(&self, level: isize) -> Attributes {
         assert!(level <= 3);
         assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
 
@@ -195,7 +277,7 @@
     }
 
     /// Set block descriptor to invalid
-    pub fn set_block_descriptor_to_invalid(&mut self, level: usize) {
+    pub fn set_block_descriptor_to_invalid(&mut self, level: isize) {
         assert!(level <= 3);
         assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
 
@@ -203,7 +285,7 @@
     }
 
     /// Set block or invalid descriptor to invalid
-    pub fn set_block_or_invalid_descriptor_to_invalid(&mut self, level: usize) {
+    pub fn set_block_or_invalid_descriptor_to_invalid(&mut self, level: isize) {
         assert!(level <= 3);
         assert!(DescriptorType::Table != self.get_descriptor_type(level));
 
@@ -216,12 +298,11 @@
     /// the descriptor.
     pub unsafe fn set_table_descriptor(
         &mut self,
-        level: usize,
+        level: isize,
         next_level_table: &mut [Descriptor],
         next_level_attributes: Option<NextLevelAttributes>,
     ) {
         assert!(level <= 2);
-        assert_eq!(Self::TABLE_ENTRY_COUNT, next_level_table.len());
         assert!(self.get_descriptor_type(level) != DescriptorType::Table);
 
         let table_addr = KernelSpace::kernel_to_pa(next_level_table.as_ptr() as u64);
@@ -242,30 +323,38 @@
     ///
     /// Safety: The returned next level table is based on the address read from the descriptor.
     /// The caller has to ensure that no other references are being used of the table.
-    pub unsafe fn get_next_level_table(&self, level: usize) -> &[Descriptor] {
+    pub unsafe fn get_next_level_table<const VA_BITS: usize>(
+        &self,
+        granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> &[Descriptor] {
         assert!(level <= 2);
         assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
 
         let table_address =
             KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *const Descriptor;
-        core::slice::from_raw_parts(table_address, Self::TABLE_ENTRY_COUNT)
+        core::slice::from_raw_parts(table_address, granule.entry_count_at_level(level + 1))
     }
 
     /// Get mutable next level table
     ///
     /// Safety: The returned next level table is based on the address read from the descriptor.
     /// The caller has to ensure that no other references are being used of the table.
-    pub unsafe fn get_next_level_table_mut(&mut self, level: usize) -> &mut [Descriptor] {
+    pub unsafe fn get_next_level_table_mut<const VA_BITS: usize>(
+        &mut self,
+        granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> &mut [Descriptor] {
         assert!(level <= 2);
         assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
 
         let table_address =
             KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
-        core::slice::from_raw_parts_mut(table_address, Self::TABLE_ENTRY_COUNT)
+        core::slice::from_raw_parts_mut(table_address, granule.entry_count_at_level(level + 1))
     }
 
     /// Get next level attributes
-    pub fn get_next_level_attributes(&self, level: usize) -> NextLevelAttributes {
+    pub fn get_next_level_attributes(&self, level: isize) -> NextLevelAttributes {
         assert!(level <= 2);
         assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
 
@@ -276,14 +365,18 @@
     ///
     /// **Unsafe:** The returned descriptor reference must be released by the caller, i.e. release
     /// to `PagePool`
-    pub unsafe fn set_table_descriptor_to_invalid(&mut self, level: usize) -> &mut [Descriptor] {
+    pub unsafe fn set_table_descriptor_to_invalid<const VA_BITS: usize>(
+        &mut self,
+        granule: TranslationGranule<VA_BITS>,
+        level: isize,
+    ) -> &mut [Descriptor] {
         assert!(level <= 2);
         assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
 
         let table_address =
             KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
         self.set(Self::INVALID_DESCRIPTOR_VALUE);
-        core::slice::from_raw_parts_mut(table_address, Self::TABLE_ENTRY_COUNT)
+        core::slice::from_raw_parts_mut(table_address, granule.entry_count_at_level(level + 1))
     }
 
     /// Get raw descriptor value
@@ -309,11 +402,6 @@
     {
         self.set(f(self.get()))
     }
-
-    /// Get output address mask
-    fn get_oa_mask(level: usize) -> usize {
-        Self::OA_MASK as usize & !(Self::GRANULE_SIZES[level] - 1)
-    }
 }
 
 #[cfg(test)]
@@ -477,7 +565,12 @@
             cell: UnsafeCell::new(1),
         };
 
-        descriptor.set_block_descriptor(1, PhysicalAddress(0), Attributes::default());
+        descriptor.set_block_descriptor::<48>(
+            TranslationGranule::Granule4k,
+            1,
+            PhysicalAddress(0),
+            Attributes::default(),
+        );
         assert_eq!(0x1, descriptor.get());
     }
 
@@ -488,16 +581,23 @@
             cell: UnsafeCell::new(0),
         };
 
-        descriptor.set_block_descriptor(1, PhysicalAddress(1 << 63), Attributes::default());
+        descriptor.set_block_descriptor::<48>(
+            TranslationGranule::Granule4k,
+            1,
+            PhysicalAddress(1 << 63),
+            Attributes::default(),
+        );
     }
 
     #[test]
     fn test_descriptor_block() {
+        let granule: TranslationGranule<48> = TranslationGranule::Granule4k;
         let mut descriptor = Descriptor {
             cell: UnsafeCell::new(0),
         };
 
         descriptor.set_block_descriptor(
+            granule,
             1,
             PhysicalAddress(0x0000000f_c0000000),
             Attributes {
@@ -512,6 +612,7 @@
         };
 
         descriptor.set_block_descriptor(
+            granule,
             3,
             PhysicalAddress(0x0000000f_fffff000),
             Attributes {
@@ -523,7 +624,7 @@
 
         assert_eq!(
             PhysicalAddress(0x0000000f_fffff000),
-            descriptor.get_block_output_address(3)
+            descriptor.get_block_output_address(granule, 3)
         );
         assert_eq!(
             Attributes {
@@ -687,7 +788,9 @@
             cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
         };
         assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
-            descriptor.get_next_level_table(0).as_ptr() as u64
+            descriptor
+                .get_next_level_table::<36>(TranslationGranule::Granule4k, 0)
+                .as_ptr() as u64
         });
     }
 
@@ -698,7 +801,9 @@
             cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
         };
         assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
-            descriptor.get_next_level_table_mut(0).as_ptr() as *mut Descriptor as u64
+            descriptor
+                .get_next_level_table_mut::<36>(TranslationGranule::Granule4k, 0)
+                .as_ptr() as *mut Descriptor as u64
         });
     }
 
@@ -724,7 +829,9 @@
             cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
         };
         assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
-            descriptor.set_table_descriptor_to_invalid(0).as_ptr() as *mut Descriptor as u64
+            descriptor
+                .set_table_descriptor_to_invalid::<36>(TranslationGranule::Granule4k, 0)
+                .as_ptr() as *mut Descriptor as u64
         });
         assert_eq!(0, descriptor.get());
     }
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index d0be315..34433ff 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -11,12 +11,12 @@
 use super::{
     address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
     page_pool::{Page, PagePool},
-    MemoryAccessRights, RegimeVaRange, TranslationRegime, Xlat, XlatError,
+    MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError,
 };
 
 #[derive(Clone)]
 pub struct KernelSpace {
-    xlat: Arc<Mutex<Xlat>>,
+    xlat: Arc<Mutex<Xlat<36>>>,
 }
 
 /// # Kernel space memory mapping
@@ -39,6 +39,7 @@
                 page_pool,
                 unsafe { VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000) },
                 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
+                TranslationGranule::Granule4k,
             ))),
         }
     }
diff --git a/src/lib.rs b/src/lib.rs
index f5b1962..fec87cc 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -11,7 +11,6 @@
 use core::{fmt, panic};
 
 use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
-use alloc::boxed::Box;
 use alloc::format;
 use alloc::string::{String, ToString};
 use alloc::vec::Vec;
@@ -24,7 +23,7 @@
 
 use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
 use self::kernel_space::KernelSpace;
-use self::page_pool::{Page, PagePool, Pages};
+use self::page_pool::{PagePool, Pages};
 use self::region::{PhysicalRegion, VirtualRegion};
 use self::region_pool::{Region, RegionPool, RegionPoolError};
 
@@ -36,20 +35,6 @@
 mod region;
 mod region_pool;
 
-/// The first level of memory descriptors table which
-#[repr(C, align(512))]
-pub struct BaseTable {
-    pub descriptors: [Descriptor; 64],
-}
-
-impl BaseTable {
-    pub fn new() -> Self {
-        BaseTable {
-            descriptors: core::array::from_fn(|_| Descriptor::default()),
-        }
-    }
-}
-
 /// Translation table error type
 #[derive(Debug)]
 pub enum XlatError {
@@ -129,13 +114,12 @@
 struct Block {
     pa: PhysicalAddress,
     va: VirtualAddress,
-    granule: usize,
+    size: usize,
 }
 
 impl Block {
-    fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
-        assert!(Xlat::GRANULE_SIZES.contains(&granule));
-        Self { pa, va, granule }
+    fn new(pa: PhysicalAddress, va: VirtualAddress, size: usize) -> Self {
+        Self { pa, va, size }
     }
 }
 
@@ -144,7 +128,7 @@
         f.debug_struct("Block")
             .field("pa", &format_args!("{:#010x}", self.pa.0))
             .field("va", &format_args!("{:#010x}", self.va.0))
-            .field("granule", &format_args!("{:#010x}", self.granule))
+            .field("size", &format_args!("{:#010x}", self.size))
             .finish()
     }
 }
@@ -164,11 +148,12 @@
 
 pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
 
-pub struct Xlat {
-    base_table: Box<BaseTable>,
+pub struct Xlat<const VA_BITS: usize> {
+    base_table: Pages,
     page_pool: PagePool,
     regions: RegionPool<VirtualRegion>,
     regime: TranslationRegime,
+    granule: TranslationGranule<VA_BITS>,
 }
 
 /// Memory translation table handling
@@ -196,23 +181,32 @@
 /// * map block
 /// * unmap block
 /// * set access rights of block
-impl Xlat {
-    pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
-
+impl<const VA_BITS: usize> Xlat<VA_BITS> {
     pub fn new(
         page_pool: PagePool,
         address: VirtualAddressRange,
         regime: TranslationRegime,
+        granule: TranslationGranule<VA_BITS>,
     ) -> Self {
+        let initial_lookup_level = granule.initial_lookup_level();
+
+        let base_table = page_pool
+            .allocate_pages(
+                granule.table_size::<Descriptor>(initial_lookup_level),
+                Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
+            )
+            .unwrap();
+
         let mut regions = RegionPool::new();
         regions
             .add(VirtualRegion::new(address.start, address.len().unwrap()))
             .unwrap();
         Self {
-            base_table: Box::new(BaseTable::new()),
+            base_table,
             page_pool,
             regions,
             regime,
+            granule,
         }
     }
 
@@ -230,13 +224,16 @@
         data: &[u8],
         access_rights: MemoryAccessRights,
     ) -> Result<VirtualAddress, XlatError> {
-        let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
-            XlatError::AllocationError(format!(
-                "Cannot allocate pages for {} bytes ({:?})",
-                data.len(),
-                e
-            ))
-        })?;
+        let mut pages = self
+            .page_pool
+            .allocate_pages(data.len(), Some(self.granule as usize))
+            .map_err(|e| {
+                XlatError::AllocationError(format!(
+                    "Cannot allocate pages for {} bytes ({:?})",
+                    data.len(),
+                    e
+                ))
+            })?;
 
         pages.copy_data_to_page(data);
 
@@ -266,9 +263,14 @@
         length: usize,
         access_rights: MemoryAccessRights,
     ) -> Result<VirtualAddress, XlatError> {
-        let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
-            XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
-        })?;
+        let mut pages = self
+            .page_pool
+            .allocate_pages(length, Some(self.granule as usize))
+            .map_err(|e| {
+                XlatError::AllocationError(format!(
+                    "Cannot allocate pages for {length} bytes ({e:?})"
+                ))
+            })?;
 
         pages.zero_init();
 
@@ -387,7 +389,43 @@
     /// references. After activation the caller must ensure that there are no
     /// active references when unmapping memory.
     pub unsafe fn activate(&self) {
-        let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
+        // Select translation granule
+        let is_tg0 = match &self.regime {
+            TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
+            | TranslationRegime::EL2
+            | TranslationRegime::EL3 => true,
+            TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
+            #[cfg(target_feature = "vh")]
+            TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
+            #[cfg(target_feature = "vh")]
+            TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
+        };
+
+        #[cfg(target_arch = "aarch64")]
+        if is_tg0 {
+            self.modify_tcr(|tcr| {
+                let tg0 = match self.granule {
+                    TranslationGranule::Granule4k => 0b00,
+                    TranslationGranule::Granule16k => 0b10,
+                    TranslationGranule::Granule64k => 0b01,
+                };
+
+                (tcr & !(3 << 14)) | (tg0 << 14)
+            });
+        } else {
+            self.modify_tcr(|tcr| {
+                let tg1 = match self.granule {
+                    TranslationGranule::Granule4k => 0b10,
+                    TranslationGranule::Granule16k => 0b01,
+                    TranslationGranule::Granule64k => 0b11,
+                };
+
+                (tcr & !(3 << 30)) | (tg1 << 30)
+            });
+        }
+
+        // Set translation table
+        let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
 
         #[cfg(target_arch = "aarch64")]
         match &self.regime {
@@ -420,13 +458,66 @@
         }
     }
 
+    /// Modifies the TCR register of the selected regime of the instance.
+    #[cfg(target_arch = "aarch64")]
+    unsafe fn modify_tcr<F>(&self, f: F)
+    where
+        F: Fn(u64) -> u64,
+    {
+        let mut tcr: u64;
+
+        match &self.regime {
+            TranslationRegime::EL1_0(_, _) => core::arch::asm!(
+                    "mrs {0}, tcr_el1
+                    isb",
+                    out(reg) tcr),
+            #[cfg(target_feature = "vh")]
+            TranslationRegime::EL2_0(_, _) => core::arch::asm!(
+                    "mrs {0}, tcr_el2
+                    isb",
+                    out(reg) tcr),
+            TranslationRegime::EL2 => core::arch::asm!(
+                    "mrs {0}, tcr_el2
+                    isb",
+                    out(reg) tcr),
+            TranslationRegime::EL3 => core::arch::asm!(
+                    "mrs {0}, tcr_el3
+                    isb",
+                    out(reg) tcr),
+        }
+
+        tcr = f(tcr);
+
+        match &self.regime {
+            TranslationRegime::EL1_0(_, _) => core::arch::asm!(
+                    "msr tcr_el1, {0}
+                    isb",
+                    in(reg) tcr),
+            #[cfg(target_feature = "vh")]
+            TranslationRegime::EL2_0(_, _) => core::arch::asm!(
+                    "msr tcr_el2, {0}
+                    isb",
+                    in(reg) tcr),
+            TranslationRegime::EL2 => core::arch::asm!(
+                    "msr tcr_el2, {0}
+                    isb",
+                    in(reg) tcr),
+            TranslationRegime::EL3 => core::arch::asm!(
+                    "msr tcr_el3, {0}
+                    isb",
+                    in(reg) tcr),
+        }
+    }
+
     /// Prints the translation tables to debug console recursively
     pub fn print(&self) {
-        debug!(
-            "Xlat table -> {:#010x}",
-            self.base_table.descriptors.as_ptr() as u64
+        debug!("Xlat table -> {:#010x}", self.base_table.get_pa().0 as u64);
+        Self::print_table(
+            self.granule.initial_lookup_level(),
+            0,
+            unsafe { self.base_table.get_as_slice() },
+            self.granule,
         );
-        Self::print_table(1, 0, &self.base_table.descriptors);
     }
 
     /// Prints a single translation table to the debug console
@@ -434,30 +525,36 @@
     /// * level: Level of the translation table
     /// * va: Base virtual address of the table
     /// * table: Table entries
-    pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
+    pub fn print_table(
+        level: isize,
+        va: usize,
+        table: &[Descriptor],
+        granule: TranslationGranule<VA_BITS>,
+    ) {
         let level_prefix = match level {
             0 | 1 => "|-",
             2 => "|  |-",
             _ => "|  |  |-",
         };
 
-        for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
+        for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
             match descriptor.get_descriptor_type(level) {
                 DescriptorType::Block => debug!(
                     "{} {:#010x} Block -> {:#010x}",
                     level_prefix,
                     va,
-                    descriptor.get_block_output_address(level).0
+                    descriptor.get_block_output_address(granule, level).0
                 ),
                 DescriptorType::Table => {
-                    let next_level_table = unsafe { descriptor.get_next_level_table(level) };
+                    let next_level_table =
+                        unsafe { descriptor.get_next_level_table(granule, level) };
                     debug!(
                         "{} {:#010x} Table -> {:#010x}",
                         level_prefix,
                         va,
                         next_level_table.as_ptr() as usize
                     );
-                    Self::print_table(level + 1, va, next_level_table);
+                    Self::print_table(level + 1, va, next_level_table, granule);
                 }
                 _ => {}
             }
@@ -475,7 +572,12 @@
         region: VirtualRegion,
         attributes: Attributes,
     ) -> Result<VirtualAddress, XlatError> {
-        let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+        let blocks = Self::split_region_to_blocks(
+            region.get_pa(),
+            region.base(),
+            region.length(),
+            self.granule,
+        )?;
         for block in blocks {
             self.map_block(block, attributes.clone());
         }
@@ -488,7 +590,12 @@
     /// # Arguments
     /// * region: Memory region object
     fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
-        let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+        let blocks = Self::split_region_to_blocks(
+            region.get_pa(),
+            region.base(),
+            region.length(),
+            self.granule,
+        )?;
         for block in blocks {
             self.unmap_block(block);
         }
@@ -510,14 +617,16 @@
     /// * pa: Physical address
     /// * va: Virtual address
     /// * length: Region size in bytes
+    /// * granule: Translation granule
     /// # Return value
     /// * Vector of granule sized blocks
     fn split_region_to_blocks(
         mut pa: PhysicalAddress,
         mut va: VirtualAddress,
         mut length: usize,
+        granule: TranslationGranule<VA_BITS>,
     ) -> Result<Vec<Block>, XlatError> {
-        let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
+        let min_granule_mask = granule.block_size_at_level(3) - 1;
 
         if length == 0 {
             return Err(XlatError::InvalidParameterError(
@@ -527,21 +636,25 @@
 
         if (pa.0 | va.0 | length) & min_granule_mask != 0 {
             return Err(XlatError::InvalidParameterError(format!(
-                "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
-                pa.0, va.0, length
+                "Addresses and length must be aligned {:#08x} {:#08x} {:#x} {:#x}",
+                pa.0, va.0, length, min_granule_mask
             )));
         }
 
         let mut pages = Vec::new();
 
         while length > 0 {
-            for granule in &Self::GRANULE_SIZES {
-                if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
-                    pages.push(Block::new(pa, va, *granule));
-                    pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
-                    va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
+            let initial_lookup_level = granule.initial_lookup_level();
 
-                    length -= *granule;
+            for block_size in
+                (initial_lookup_level..=3).map(|level| granule.block_size_at_level(level))
+            {
+                if (pa.0 | va.0) & (block_size - 1) == 0 && length >= block_size {
+                    pages.push(Block::new(pa, va, block_size));
+                    pa = pa.add_offset(block_size).ok_or(XlatError::Overflow)?;
+                    va = va.add_offset(block_size).ok_or(XlatError::Overflow)?;
+
+                    length -= block_size;
                     break;
                 }
             }
@@ -559,11 +672,12 @@
             attributes,
             block.pa,
             block.va,
-            block.granule,
-            1,
-            self.base_table.descriptors.as_mut_slice(),
+            block.size,
+            self.granule.initial_lookup_level(),
+            unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
             &self.page_pool,
             &self.regime,
+            self.granule,
         );
     }
 
@@ -573,99 +687,122 @@
     /// * attributes: Memory block's permssions, flags
     /// * pa: Physical address
     /// * va: Virtual address
-    /// * granule: Translation granule in bytes
+    /// * block_size: The block size in bytes
     /// * level: Translation table level
     /// * table: Translation table on the given level
     /// * page_pool: Page pool where the function can allocate pages for the translation tables
+    /// * regime: Translation regime
+    /// * granule: Translation granule
     #[allow(clippy::too_many_arguments)]
     fn set_block_descriptor_recursively(
         attributes: Attributes,
         pa: PhysicalAddress,
         va: VirtualAddress,
-        granule: usize,
-        level: usize,
+        block_size: usize,
+        level: isize,
         table: &mut [Descriptor],
         page_pool: &PagePool,
         regime: &TranslationRegime,
+        granule: TranslationGranule<VA_BITS>,
     ) {
         // Get descriptor of the current level
-        let descriptor = &mut table[va.get_level_index(level)];
+        let descriptor = &mut table[va.get_level_index(granule, level)];
 
         // We reached the required granule level
-        if Self::GRANULE_SIZES[level] == granule {
+        if granule.block_size_at_level(level) == block_size {
             // Follow break-before-make sequence
             descriptor.set_block_or_invalid_descriptor_to_invalid(level);
             Self::invalidate(regime, Some(va));
-            descriptor.set_block_descriptor(level, pa, attributes);
+            descriptor.set_block_descriptor(granule, level, pa, attributes);
             return;
         }
 
         // Need to iterate forward
         match descriptor.get_descriptor_type(level) {
             DescriptorType::Invalid => {
-                let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+                let mut page = page_pool
+                    .allocate_pages(
+                        granule.table_size::<Descriptor>(level + 1),
+                        Some(granule.table_alignment::<Descriptor>(level + 1)),
+                    )
+                    .unwrap();
                 unsafe {
-                    let next_table = page.get_as_slice();
+                    let next_table = page.get_as_mut_slice();
                     descriptor.set_table_descriptor(level, next_table, None);
                 }
                 Self::set_block_descriptor_recursively(
                     attributes,
                     pa,
-                    va.mask_for_level(level),
-                    granule,
+                    va.mask_for_level(granule, level),
+                    block_size,
                     level + 1,
-                    unsafe { descriptor.get_next_level_table_mut(level) },
+                    unsafe { descriptor.get_next_level_table_mut(granule, level) },
                     page_pool,
                     regime,
+                    granule,
                 )
             }
             DescriptorType::Block => {
                 // Saving current descriptor details
-                let current_va = va.mask_for_level(level);
-                let current_pa = descriptor.get_block_output_address(level);
+                let current_va = va.mask_for_level(granule, level);
+                let current_pa = descriptor.get_block_output_address(granule, level);
                 let current_attributes = descriptor.get_block_attributes(level);
 
                 // Replace block descriptor by table descriptor
-                let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+
+                // Follow break-before-make sequence
+                descriptor.set_block_or_invalid_descriptor_to_invalid(level);
+                Self::invalidate(regime, Some(current_va));
+
+                let mut page = page_pool
+                    .allocate_pages(
+                        granule.table_size::<Descriptor>(level + 1),
+                        Some(granule.table_alignment::<Descriptor>(level + 1)),
+                    )
+                    .unwrap();
                 unsafe {
-                    let next_table = page.get_as_slice();
+                    let next_table = page.get_as_mut_slice();
                     descriptor.set_table_descriptor(level, next_table, None);
                 }
 
                 // Explode block descriptor to table entries
                 for exploded_va in VirtualAddressRange::new(
                     current_va,
-                    current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
+                    current_va
+                        .add_offset(granule.block_size_at_level(level))
+                        .unwrap(),
                 )
-                .step_by(Self::GRANULE_SIZES[level + 1])
+                .step_by(granule.block_size_at_level(level + 1))
                 {
                     let offset = exploded_va.diff(current_va).unwrap();
                     Self::set_block_descriptor_recursively(
                         current_attributes.clone(),
                         current_pa.add_offset(offset).unwrap(),
-                        exploded_va.mask_for_level(level),
-                        Self::GRANULE_SIZES[level + 1],
+                        exploded_va.mask_for_level(granule, level),
+                        granule.block_size_at_level(level + 1),
                         level + 1,
-                        unsafe { descriptor.get_next_level_table_mut(level) },
+                        unsafe { descriptor.get_next_level_table_mut(granule, level) },
                         page_pool,
                         regime,
+                        granule,
                     )
                 }
 
                 // Invoke self to continue recursion on the newly created level
                 Self::set_block_descriptor_recursively(
-                    attributes, pa, va, granule, level, table, page_pool, regime,
+                    attributes, pa, va, block_size, level, table, page_pool, regime, granule,
                 );
             }
             DescriptorType::Table => Self::set_block_descriptor_recursively(
                 attributes,
                 pa,
-                va.mask_for_level(level),
-                granule,
+                va.mask_for_level(granule, level),
+                block_size,
                 level + 1,
-                unsafe { descriptor.get_next_level_table_mut(level) },
+                unsafe { descriptor.get_next_level_table_mut(granule, level) },
                 page_pool,
                 regime,
+                granule,
             ),
         }
     }
@@ -676,11 +813,12 @@
     fn unmap_block(&mut self, block: Block) {
         Self::remove_block_descriptor_recursively(
             block.va,
-            block.granule,
-            1,
-            self.base_table.descriptors.as_mut_slice(),
+            block.size,
+            self.granule.initial_lookup_level(),
+            unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
             &self.page_pool,
             &self.regime,
+            self.granule,
         );
     }
 
@@ -688,23 +826,26 @@
     /// become empty during the removal process.
     /// # Arguments
     /// * va: Virtual address
-    /// * granule: Translation granule in bytes
+    /// * block_size: Translation block size in bytes
     /// * level: Translation table level
     /// * table: Translation table on the given level
     /// * page_pool: Page pool where the function can release the pages of empty tables
+    /// * regime: Translation regime
+    /// * granule: Translation granule
     fn remove_block_descriptor_recursively(
         va: VirtualAddress,
-        granule: usize,
-        level: usize,
+        block_size: usize,
+        level: isize,
         table: &mut [Descriptor],
         page_pool: &PagePool,
         regime: &TranslationRegime,
+        granule: TranslationGranule<VA_BITS>,
     ) {
         // Get descriptor of the current level
-        let descriptor = &mut table[va.get_level_index(level)];
+        let descriptor = &mut table[va.get_level_index(granule, level)];
 
-        // We reached the required granule level
-        if Self::GRANULE_SIZES[level] == granule {
+        // We reached the required level with the matching block size
+        if granule.block_size_at_level(level) == block_size {
             descriptor.set_block_descriptor_to_invalid(level);
             Self::invalidate(regime, Some(va));
             return;
@@ -716,23 +857,27 @@
                 panic!("Cannot remove block from non-existing table");
             }
             DescriptorType::Block => {
-                panic!("Cannot remove block with different granule");
+                panic!("Cannot remove block with different block size");
             }
             DescriptorType::Table => {
-                let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
+                let next_level_table =
+                    unsafe { descriptor.get_next_level_table_mut(granule, level) };
                 Self::remove_block_descriptor_recursively(
-                    va.mask_for_level(level),
-                    granule,
+                    va.mask_for_level(granule, level),
+                    block_size,
                     level + 1,
                     next_level_table,
                     page_pool,
                     regime,
+                    granule,
                 );
 
                 if next_level_table.iter().all(|d| !d.is_valid()) {
                     // Empty table
                     let mut page = unsafe {
-                        Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
+                        Pages::from_slice(
+                            descriptor.set_table_descriptor_to_invalid(granule, level),
+                        )
                     };
                     page.zero_init();
                     page_pool.release_pages(page).unwrap();
@@ -741,20 +886,27 @@
         }
     }
 
-    fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
-        Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
+    fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
+        Self::walk_descriptors(
+            va,
+            block_size,
+            self.granule.initial_lookup_level(),
+            unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
+            self.granule,
+        )
     }
 
     fn walk_descriptors(
         va: VirtualAddress,
-        granule: usize,
-        level: usize,
+        block_size: usize,
+        level: isize,
         table: &mut [Descriptor],
+        granule: TranslationGranule<VA_BITS>,
     ) -> &mut Descriptor {
         // Get descriptor of the current level
-        let descriptor = &mut table[va.get_level_index(level)];
+        let descriptor = &mut table[va.get_level_index(granule, level)];
 
-        if Self::GRANULE_SIZES[level] == granule {
+        if granule.block_size_at_level(level) == block_size {
             return descriptor;
         }
 
@@ -766,11 +918,13 @@
             DescriptorType::Block => {
                 panic!("Cannot split existing block descriptor to table");
             }
-            DescriptorType::Table => {
-                Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
-                    descriptor.get_next_level_table_mut(level)
-                })
-            }
+            DescriptorType::Table => Self::walk_descriptors(
+                va.mask_for_level(granule, level),
+                block_size,
+                level + 1,
+                unsafe { descriptor.get_next_level_table_mut(granule, level) },
+                granule,
+            ),
         }
     }
 
@@ -842,16 +996,19 @@
 mod tests {
     use super::*;
 
-    fn make_block(pa: usize, va: usize, granule: usize) -> Block {
-        Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
+    type TestXlat = Xlat<36>;
+
+    fn make_block(pa: usize, va: usize, size: usize) -> Block {
+        Block::new(PhysicalAddress(pa), VirtualAddress(va), size)
     }
 
     #[test]
     fn test_split_to_pages() {
-        let pages = Xlat::split_region_to_blocks(
+        let pages = TestXlat::split_region_to_blocks(
             PhysicalAddress(0x3fff_c000),
             VirtualAddress(0x3fff_c000),
             0x4020_5000,
+            TranslationGranule::Granule4k,
         )
         .unwrap();
         assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
@@ -865,10 +1022,11 @@
 
     #[test]
     fn test_split_to_pages_unaligned() {
-        let pages = Xlat::split_region_to_blocks(
+        let pages = TestXlat::split_region_to_blocks(
             PhysicalAddress(0x3fff_c000),
             VirtualAddress(0x3f20_0000),
             0x200000,
+            TranslationGranule::Granule4k,
         )
         .unwrap();
         for (i, block) in pages.iter().enumerate().take(512) {
diff --git a/src/page_pool.rs b/src/page_pool.rs
index 2d966d9..e0b6dc4 100644
--- a/src/page_pool.rs
+++ b/src/page_pool.rs
@@ -58,7 +58,7 @@
     /// Zero init pages
     pub fn zero_init(&mut self) {
         unsafe {
-            self.get_as_slice::<u8>().fill(0);
+            self.get_as_mut_slice::<u8>().fill(0);
         }
     }
 
@@ -67,11 +67,24 @@
         PhysicalAddress(self.pa)
     }
 
+    /// Get as slice
+    ///
+    /// **Unsafe**: The returned slice is created from its address and length which is stored in the
+    /// object. The caller has to ensure that no other references are being used of the pages.
+    pub unsafe fn get_as_slice<T>(&self) -> &[T] {
+        assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
+
+        core::slice::from_raw_parts(
+            KernelSpace::pa_to_kernel(self.pa as u64) as *const T,
+            self.length / core::mem::size_of::<T>(),
+        )
+    }
+
     /// Get as mutable slice
     ///
     /// **Unsafe**: The returned slice is created from its address and length which is stored in the
     /// object. The caller has to ensure that no other references are being used of the pages.
-    pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
+    pub unsafe fn get_as_mut_slice<T>(&mut self) -> &mut [T] {
         assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
 
         core::slice::from_raw_parts_mut(
@@ -200,10 +213,18 @@
     }
 
     /// Allocate pages for given length
-    pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
-        self.pages
-            .lock()
-            .allocate(Self::round_up_to_page_size(length), (), None)
+    pub fn allocate_pages(
+        &self,
+        length: usize,
+        alignment: Option<usize>,
+    ) -> Result<Pages, PagePoolError> {
+        let aligned_length = if let Some(alignment) = alignment {
+            length.next_multiple_of(alignment)
+        } else {
+            length
+        };
+
+        self.pages.lock().allocate(aligned_length, (), alignment)
     }
 
     /// Release pages
@@ -239,7 +260,7 @@
         pages.zero_init();
         assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
 
-        let s = unsafe { pages.get_as_slice() };
+        let s = unsafe { pages.get_as_mut_slice() };
         for (i, e) in s.iter_mut().enumerate().take(8) {
             *e = i as u8;
         }
diff --git a/src/region.rs b/src/region.rs
index 09958e8..d98afa5 100644
--- a/src/region.rs
+++ b/src/region.rs
@@ -723,7 +723,7 @@
     fn test_virtual_region_drop() {
         static PAGE_POOL_AREA: PagePoolArea<8192> = PagePoolArea::new();
         let page_pool = PagePool::new(&PAGE_POOL_AREA);
-        let page = page_pool.allocate_pages(4096).unwrap();
+        let page = page_pool.allocate_pages(4096, None).unwrap();
 
         let physical_region = PhysicalRegion::Allocated(page_pool, page);