Implement dynamic kernel memory mapping

Extend KernelSpace to build the kernel address space memory mapping
runtime.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I7a27540f58a032ff49ba35f5f43afb5a8c9e5be8
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index 320d16e..66a65c1 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -3,28 +3,161 @@
 
 //! Module for converting addresses between kernel virtual address space to physical address space
 
-pub struct KernelSpace {}
+use core::ops::Range;
 
-#[cfg(not(test))]
+use alloc::string::String;
+use spin::Mutex;
+
+use super::{
+    page_pool::{Page, PagePool},
+    MemoryAccessRights, Xlat, XlatError,
+};
+
+static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
+
+pub struct KernelSpace {
+    xlat: Mutex<Xlat>,
+}
+
+/// Kernel space memory mapping
+///
+/// This object handles the translation tables of the kernel address space. The main goal is to
+/// limit the kernel's access to the memory and only map the ranges which are necessary for
+/// operation.
+/// The current implementation uses identity mapping into the upper virtual address range, e.g.
+/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
 impl KernelSpace {
+    pub const PAGE_SIZE: usize = Page::SIZE;
+
+    /// Creates the kernel memory mapping instance. This should be called from the main core's init
+    /// code.
+    /// # Arguments
+    /// * page_pool: Page pool for allocation kernel translation tables
+    pub fn create_instance(page_pool: PagePool) {
+        unsafe {
+            assert!(KERNEL_SPACE_INSTANCE.is_none());
+
+            KERNEL_SPACE_INSTANCE = Some(Self {
+                xlat: Mutex::new(Xlat::new(page_pool, 0x0000_0000..0x10_0000_0000)),
+            });
+        }
+    }
+
+    /// Maps the code (RX) and data (RW) segments of the SPMC itself.
+    /// # Arguments
+    /// * code_range: (start, end) addresses of the code segment
+    /// * data_range: (start, end) addresses of the data segment
+    /// # Return value
+    /// * The result of the operation
+    pub fn init(code_range: Range<usize>, data_range: Range<usize>) -> Result<(), XlatError> {
+        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
+            let mut xlat = kernel_space.xlat.lock();
+
+            xlat.map_physical_address_range(
+                Some(code_range.start),
+                code_range.start,
+                code_range.len(),
+                MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
+            )?;
+
+            xlat.map_physical_address_range(
+                Some(data_range.start),
+                data_range.start,
+                data_range.len(),
+                MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
+            )?;
+
+            Ok(())
+        } else {
+            Err(XlatError::InvalidOperation(String::from(
+                "KernelSpace is not initialized",
+            )))
+        }
+    }
+
+    /// Map memory range into the kernel address space
+    /// # Arguments
+    /// * pa: Physical address of the memory
+    /// * length: Length of the range in bytes
+    /// * access_right: Memory access rights
+    /// # Return value
+    /// * Virtual address of the mapped memory or error
+    pub fn map_memory(
+        pa: usize,
+        length: usize,
+        access_rights: MemoryAccessRights,
+    ) -> Result<usize, XlatError> {
+        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
+            let lower_va = kernel_space.xlat.lock().map_physical_address_range(
+                Some(pa),
+                pa,
+                length,
+                access_rights | MemoryAccessRights::GLOBAL,
+            )?;
+
+            Ok(Self::pa_to_kernel(lower_va as u64) as usize)
+        } else {
+            Err(XlatError::InvalidOperation(String::from(
+                "KernelSpace is not initialized",
+            )))
+        }
+    }
+
+    /// Unmap memory range from the kernel address space
+    /// # Arguments
+    /// * va: Virtual address of the memory
+    /// * length: Length of the range in bytes
+    /// # Return value
+    /// The result of the operation
+    pub fn unmap_memory(va: usize, length: usize) -> Result<(), XlatError> {
+        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
+            kernel_space
+                .xlat
+                .lock()
+                .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
+        } else {
+            Err(XlatError::InvalidOperation(String::from(
+                "KernelSpace is not initialized",
+            )))
+        }
+    }
+
+    /// Activate kernel address space mapping
+    pub fn activate() {
+        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
+            kernel_space.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
+        }
+    }
+
+    /// Rounds a value down to a kernel space page boundary
+    pub const fn round_down_to_page_size(size: usize) -> usize {
+        size & !(Self::PAGE_SIZE - 1)
+    }
+
+    /// Rounds a value up to a kernel space page boundary
+    pub const fn round_up_to_page_size(size: usize) -> usize {
+        (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
+    }
+
     /// Kernel virtual address to physical address
+    #[cfg(not(test))]
     pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
         kernel_address & 0x0000_000f_ffff_ffff
     }
-
     /// Physical address to kernel virtual address
+    #[cfg(not(test))]
     pub const fn pa_to_kernel(pa: u64) -> u64 {
         // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
         pa | 0xffff_fff0_0000_0000
     }
-}
 
-#[cfg(test)]
-impl KernelSpace {
+    // Do not use any mapping in test build
+    #[cfg(test)]
     pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
         kernel_address
     }
 
+    #[cfg(test)]
     pub const fn pa_to_kernel(pa: u64) -> u64 {
         pa
     }
diff --git a/src/lib.rs b/src/lib.rs
index 4986ceb..c70824e 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -9,6 +9,7 @@
 
 use core::arch::asm;
 use core::iter::zip;
+use core::ops::Range;
 use core::{fmt, panic};
 
 use alloc::boxed::Box;
@@ -85,6 +86,7 @@
 
         const USER = 0b00010000;
         const DEVICE = 0b00100000;
+        const GLOBAL = 0b01000000;
     }
 }
 
@@ -112,7 +114,7 @@
             pxn: !access_rights.contains(MemoryAccessRights::X)
                 || access_rights.contains(MemoryAccessRights::USER),
             contiguous: false,
-            not_global: true,
+            not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
             access_flag: true,
             shareability: Shareability::NonShareable,
             data_access_permissions,
@@ -146,6 +148,12 @@
     }
 }
 
+/// Enum for selecting TTBR0_EL1 or TTBR1_EL1
+pub enum TTBR {
+    TTBR0_EL1,
+    TTBR1_EL1,
+}
+
 pub struct Xlat {
     base_table: Box<BaseTable>,
     page_pool: PagePool,
@@ -178,16 +186,12 @@
 /// * unmap block
 /// * set access rights of block
 impl Xlat {
-    const BASE_VA: usize = 0x4000_0000;
     pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
 
-    pub fn new(page_pool: PagePool) -> Self {
+    pub fn new(page_pool: PagePool, va_range: Range<usize>) -> Self {
         let mut regions = RegionPool::new();
         regions
-            .add(VirtualRegion::new(
-                Self::BASE_VA,
-                0x1_0000_0000 - Self::BASE_VA,
-            ))
+            .add(VirtualRegion::new(va_range.start, va_range.len()))
             .unwrap();
         Self {
             base_table: Box::new(BaseTable::new()),
@@ -358,16 +362,29 @@
     /// Activate memory mapping represented by the object
     /// # Arguments
     /// * asid: ASID of the table base address
-    pub fn activate(&self, asid: u8) {
+    /// * ttbr: Selects TTBR0_EL1/TTBR1_EL1
+    pub fn activate(&self, asid: u8, ttbr: TTBR) {
         let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
-        let ttbr = ((asid as u64) << 48) | base_table_pa;
-        unsafe {
-            #[cfg(target_arch = "aarch64")]
-            asm!(
-                "msr ttbr0_el1, {0}
-                 isb",
-                in(reg) ttbr)
-        };
+        let ttbr_value = ((asid as u64) << 48) | base_table_pa;
+        #[cfg(target_arch = "aarch64")]
+        match ttbr {
+            TTBR::TTBR0_EL1 => unsafe {
+                asm!(
+                    "msr ttbr0_el1, {0}
+                     isb",
+                    in(reg) ttbr_value)
+            },
+            TTBR::TTBR1_EL1 => unsafe {
+                asm!(
+                    "msr ttbr1_el1, {0}
+                    isb
+
+                    tlbi	vmalle1
+                    dsb	sy
+                    isb",
+                    in(reg) ttbr_value)
+            },
+        }
     }
 
     /// Prints the translation tables to debug console recursively