Add physical and virtual address types

Create wrapper types for physical and virtual address to limit available
operations on addresses and to be able to require explicit address types
for given function parameters.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iaef5ab1af24fc153d959d79404b3827d9c85bf53
diff --git a/src/lib.rs b/src/lib.rs
index 7fc5839..884f8e0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -9,9 +9,9 @@
 
 use core::arch::asm;
 use core::iter::zip;
-use core::ops::Range;
 use core::{fmt, panic};
 
+use address::{PhysicalAddress, VirtualAddressRange, VirtualAddress};
 use alloc::boxed::Box;
 use alloc::format;
 use alloc::string::{String, ToString};
@@ -29,6 +29,7 @@
 use self::region::{PhysicalRegion, VirtualRegion};
 use self::region_pool::{Region, RegionPool, RegionPoolError};
 
+pub mod address;
 mod descriptor;
 pub mod kernel_space;
 pub mod page_pool;
@@ -126,13 +127,13 @@
 
 #[derive(PartialEq)]
 struct Block {
-    pa: usize,
-    va: usize,
+    pa: PhysicalAddress,
+    va: VirtualAddress,
     granule: usize,
 }
 
 impl Block {
-    fn new(pa: usize, va: usize, granule: usize) -> Self {
+    fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
         assert!(Xlat::GRANULE_SIZES.contains(&granule));
         Self { pa, va, granule }
     }
@@ -141,8 +142,8 @@
 impl fmt::Debug for Block {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         f.debug_struct("Block")
-            .field("pa", &format_args!("{:#010x}", self.pa))
-            .field("va", &format_args!("{:#010x}", self.va))
+            .field("pa", &format_args!("{:#010x}", self.pa.0))
+            .field("va", &format_args!("{:#010x}", self.va.0))
             .field("granule", &format_args!("{:#010x}", self.granule))
             .finish()
     }
@@ -189,10 +190,10 @@
 impl Xlat {
     pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
 
-    pub fn new(page_pool: PagePool, va_range: Range<usize>) -> Self {
+    pub fn new(page_pool: PagePool, va_range: VirtualAddressRange) -> Self {
         let mut regions = RegionPool::new();
         regions
-            .add(VirtualRegion::new(va_range.start, va_range.len()))
+            .add(VirtualRegion::new(va_range.start, va_range.len().unwrap()))
             .unwrap();
         Self {
             base_table: Box::new(BaseTable::new()),
@@ -211,10 +212,10 @@
     /// * Virtual address of the mapped memory
     pub fn allocate_initalized_range(
         &mut self,
-        va: Option<usize>,
+        va: Option<VirtualAddress>,
         data: &[u8],
         access_rights: MemoryAccessRights,
-    ) -> Result<usize, XlatError> {
+    ) -> Result<VirtualAddress, XlatError> {
         let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
             XlatError::AllocationError(format!(
                 "Cannot allocate pages for {} bytes ({:?})",
@@ -247,10 +248,10 @@
     /// * Virtual address of the mapped memory
     pub fn allocate_zero_init_range(
         &mut self,
-        va: Option<usize>,
+        va: Option<VirtualAddress>,
         length: usize,
         access_rights: MemoryAccessRights,
-    ) -> Result<usize, XlatError> {
+    ) -> Result<VirtualAddress, XlatError> {
         let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
             XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
         })?;
@@ -280,11 +281,11 @@
     /// * Virtual address of the mapped memory
     pub fn map_physical_address_range(
         &mut self,
-        va: Option<usize>,
-        pa: usize,
+        va: Option<VirtualAddress>,
+        pa: PhysicalAddress,
         length: usize,
         access_rights: MemoryAccessRights,
-    ) -> Result<usize, XlatError> {
+    ) -> Result<VirtualAddress, XlatError> {
         let resource = PhysicalRegion::PhysicalAddress(pa);
         let region = if let Some(required_va) = va {
             self.regions.acquire(required_va, length, resource)
@@ -302,7 +303,7 @@
     /// * length: Length of the memory area in bytes
     pub fn unmap_virtual_address_range(
         &mut self,
-        va: usize,
+        va: VirtualAddress,
         length: usize,
     ) -> Result<(), XlatError> {
         let pa = self.get_pa_by_va(va, length)?;
@@ -323,7 +324,11 @@
     /// * length: Length of the memory area in bytes
     /// # Return value
     /// * Physical address of the mapped memory
-    pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
+    pub fn get_pa_by_va(
+        &self,
+        va: VirtualAddress,
+        length: usize,
+    ) -> Result<PhysicalAddress, XlatError> {
         let containing_region = self
             .find_containing_region(va, length)
             .ok_or(XlatError::NotFound)?;
@@ -342,7 +347,7 @@
     /// * access_rights: New memory access rights of the area
     pub fn set_access_rights(
         &mut self,
-        va: usize,
+        va: VirtualAddress,
         length: usize,
         access_rights: MemoryAccessRights,
     ) -> Result<(), XlatError> {
@@ -415,7 +420,7 @@
                     "{} {:#010x} Block -> {:#010x}",
                     level_prefix,
                     va,
-                    descriptor.get_block_output_address(level)
+                    descriptor.get_block_output_address(level).0
                 ),
                 DescriptorType::Table => {
                     let next_level_table = unsafe { descriptor.get_next_level_table(level) };
@@ -442,7 +447,7 @@
         &mut self,
         region: VirtualRegion,
         attributes: Attributes,
-    ) -> Result<usize, XlatError> {
+    ) -> Result<VirtualAddress, XlatError> {
         let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
         for block in blocks {
             self.map_block(block, attributes.clone());
@@ -469,7 +474,7 @@
     /// * region: Virtual address to look for
     /// # Return value
     /// * Reference to virtual region if found
-    fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
+    fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
         self.regions.find_containing_region(va, length).ok()
     }
 
@@ -481,8 +486,8 @@
     /// # Return value
     /// * Vector of granule sized blocks
     fn split_region_to_blocks(
-        mut pa: usize,
-        mut va: usize,
+        mut pa: PhysicalAddress,
+        mut va: VirtualAddress,
         mut length: usize,
     ) -> Result<Vec<Block>, XlatError> {
         let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
@@ -493,12 +498,10 @@
             ));
         }
 
-        if pa & min_granule_mask != 0
-            || va & min_granule_mask != 0
-            || length & min_granule_mask != 0
-        {
+        if (pa.0 | va.0 | length) & min_granule_mask != 0 {
             return Err(XlatError::InvalidParameterError(format!(
-                "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
+                "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
+                pa.0, va.0, length
             )));
         }
 
@@ -506,10 +509,10 @@
 
         while length > 0 {
             for granule in &Self::GRANULE_SIZES {
-                if (pa | va) & (*granule - 1) == 0 && length >= *granule {
+                if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
                     pages.push(Block::new(pa, va, *granule));
-                    pa += *granule;
-                    va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
+                    pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
+                    va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
 
                     length -= *granule;
                     break;
@@ -548,15 +551,15 @@
     /// * page_pool: Page pool where the function can allocate pages for the translation tables
     fn set_block_descriptor_recursively(
         attributes: Attributes,
-        pa: usize,
-        va: usize,
+        pa: PhysicalAddress,
+        va: VirtualAddress,
         granule: usize,
         level: usize,
         table: &mut [Descriptor],
         page_pool: &PagePool,
     ) {
         // Get descriptor of the current level
-        let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+        let descriptor = &mut table[va.get_level_index(level)];
 
         // We reached the required granule level
         if Self::GRANULE_SIZES[level] == granule {
@@ -575,7 +578,7 @@
                 Self::set_block_descriptor_recursively(
                     attributes,
                     pa,
-                    va & (Self::GRANULE_SIZES[level] - 1),
+                    va.mask_for_level(level),
                     granule,
                     level + 1,
                     unsafe { descriptor.get_next_level_table_mut(level) },
@@ -584,7 +587,7 @@
             }
             DescriptorType::Block => {
                 // Saving current descriptor details
-                let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
+                let current_va = va.mask_for_level(level);
                 let current_pa = descriptor.get_block_output_address(level);
                 let current_attributes = descriptor.get_block_attributes(level);
 
@@ -596,14 +599,17 @@
                 }
 
                 // Explode block descriptor to table entries
-                for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
-                    .step_by(Self::GRANULE_SIZES[level + 1])
+                for exploded_va in VirtualAddressRange::new(
+                    current_va,
+                    current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
+                )
+                .step_by(Self::GRANULE_SIZES[level + 1])
                 {
-                    let offset = exploded_va - current_va;
+                    let offset = exploded_va.diff(current_va).unwrap();
                     Self::set_block_descriptor_recursively(
                         current_attributes.clone(),
-                        current_pa + offset,
-                        exploded_va & (Self::GRANULE_SIZES[level] - 1),
+                        current_pa.add_offset(offset).unwrap(),
+                        exploded_va.mask_for_level(level),
                         Self::GRANULE_SIZES[level + 1],
                         level + 1,
                         unsafe { descriptor.get_next_level_table_mut(level) },
@@ -619,7 +625,7 @@
             DescriptorType::Table => Self::set_block_descriptor_recursively(
                 attributes,
                 pa,
-                va & (Self::GRANULE_SIZES[level] - 1),
+                va.mask_for_level(level),
                 granule,
                 level + 1,
                 unsafe { descriptor.get_next_level_table_mut(level) },
@@ -650,14 +656,14 @@
     /// * table: Translation table on the given level
     /// * page_pool: Page pool where the function can release the pages of empty tables
     fn remove_block_descriptor_recursively(
-        va: usize,
+        va: VirtualAddress,
         granule: usize,
         level: usize,
         table: &mut [Descriptor],
         page_pool: &PagePool,
     ) {
         // Get descriptor of the current level
-        let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+        let descriptor = &mut table[va.get_level_index(level)];
 
         // We reached the required granule level
         if Self::GRANULE_SIZES[level] == granule {
@@ -676,7 +682,7 @@
             DescriptorType::Table => {
                 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
                 Self::remove_block_descriptor_recursively(
-                    va & (Self::GRANULE_SIZES[level] - 1),
+                    va.mask_for_level(level),
                     granule,
                     level + 1,
                     next_level_table,
@@ -695,18 +701,18 @@
         }
     }
 
-    fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
+    fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
         Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
     }
 
     fn walk_descriptors(
-        va: usize,
+        va: VirtualAddress,
         granule: usize,
         level: usize,
         table: &mut [Descriptor],
     ) -> &mut Descriptor {
         // Get descriptor of the current level
-        let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+        let descriptor = &mut table[va.get_level_index(level)];
 
         if Self::GRANULE_SIZES[level] == granule {
             return descriptor;
@@ -720,12 +726,11 @@
             DescriptorType::Block => {
                 panic!("Cannot split existing block descriptor to table");
             }
-            DescriptorType::Table => Self::walk_descriptors(
-                va & (Self::GRANULE_SIZES[level] - 1),
-                granule,
-                level + 1,
-                unsafe { descriptor.get_next_level_table_mut(level) },
-            ),
+            DescriptorType::Table => {
+                Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
+                    descriptor.get_next_level_table_mut(level)
+                })
+            }
         }
     }
 }
@@ -734,24 +739,38 @@
 mod tests {
     use super::*;
 
+    fn make_block(pa: usize, va: usize, granule: usize) -> Block {
+        Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
+    }
+
     #[test]
     fn test_split_to_pages() {
-        let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
-        assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
-        assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
-        assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
-        assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
-        assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
-        assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
-        assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
+        let pages = Xlat::split_region_to_blocks(
+            PhysicalAddress(0x3fff_c000),
+            VirtualAddress(0x3fff_c000),
+            0x4020_5000,
+        )
+        .unwrap();
+        assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
+        assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
+        assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
+        assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
+        assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
+        assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
+        assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
     }
 
     #[test]
     fn test_split_to_pages_unaligned() {
-        let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
+        let pages = Xlat::split_region_to_blocks(
+            PhysicalAddress(0x3fff_c000),
+            VirtualAddress(0x3f20_0000),
+            0x200000,
+        )
+        .unwrap();
         for (i, block) in pages.iter().enumerate().take(512) {
             assert_eq!(
-                Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
+                make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
                 *block
             );
         }