Remove global KernelMapping instance

Propagate thread safe instance of KernelMapping to each place where it
is being used.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Ic6946776e16dc4d6c5a1065bd4a60506ea4c245b
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index 7b013cb..4aab460 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -5,7 +5,7 @@
 
 use core::ops::Range;
 
-use alloc::string::String;
+use alloc::sync::Arc;
 use spin::Mutex;
 
 use super::{
@@ -15,8 +15,9 @@
 
 static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
 
+#[derive(Clone)]
 pub struct KernelSpace {
-    xlat: Mutex<Xlat>,
+    xlat: Arc<Mutex<Xlat>>,
 }
 
 /// Kernel space memory mapping
@@ -33,13 +34,12 @@
     /// code.
     /// # Arguments
     /// * page_pool: Page pool for allocation kernel translation tables
-    pub fn create_instance(page_pool: PagePool) {
-        unsafe {
-            assert!(KERNEL_SPACE_INSTANCE.is_none());
-
-            KERNEL_SPACE_INSTANCE = Some(Self {
-                xlat: Mutex::new(Xlat::new(page_pool, 0x0000_0000..0x10_0000_0000)),
-            });
+    pub fn new(page_pool: PagePool) -> Self {
+        Self {
+            xlat: Arc::new(Mutex::new(Xlat::new(
+                page_pool,
+                0x0000_0000..0x10_0000_0000,
+            ))),
         }
     }
 
@@ -49,30 +49,28 @@
     /// * data_range: (start, end) addresses of the data segment
     /// # Return value
     /// * The result of the operation
-    pub fn init(code_range: Range<usize>, data_range: Range<usize>) -> Result<(), XlatError> {
-        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
-            let mut xlat = kernel_space.xlat.lock();
+    pub fn init(
+        &self,
+        code_range: Range<usize>,
+        data_range: Range<usize>,
+    ) -> Result<(), XlatError> {
+        let mut xlat = self.xlat.lock();
 
-            xlat.map_physical_address_range(
-                Some(code_range.start),
-                code_range.start,
-                code_range.len(),
-                MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
-            )?;
+        xlat.map_physical_address_range(
+            Some(code_range.start),
+            code_range.start,
+            code_range.len(),
+            MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
+        )?;
 
-            xlat.map_physical_address_range(
-                Some(data_range.start),
-                data_range.start,
-                data_range.len(),
-                MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
-            )?;
+        xlat.map_physical_address_range(
+            Some(data_range.start),
+            data_range.start,
+            data_range.len(),
+            MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
+        )?;
 
-            Ok(())
-        } else {
-            Err(XlatError::InvalidOperation(String::from(
-                "KernelSpace is not initialized",
-            )))
-        }
+        Ok(())
     }
 
     /// Map memory range into the kernel address space
@@ -83,24 +81,19 @@
     /// # Return value
     /// * Virtual address of the mapped memory or error
     pub fn map_memory(
+        &self,
         pa: usize,
         length: usize,
         access_rights: MemoryAccessRights,
     ) -> Result<usize, XlatError> {
-        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
-            let lower_va = kernel_space.xlat.lock().map_physical_address_range(
-                Some(pa),
-                pa,
-                length,
-                access_rights | MemoryAccessRights::GLOBAL,
-            )?;
+        let lower_va = self.xlat.lock().map_physical_address_range(
+            Some(pa),
+            pa,
+            length,
+            access_rights | MemoryAccessRights::GLOBAL,
+        )?;
 
-            Ok(Self::pa_to_kernel(lower_va as u64) as usize)
-        } else {
-            Err(XlatError::InvalidOperation(String::from(
-                "KernelSpace is not initialized",
-            )))
-        }
+        Ok(Self::pa_to_kernel(lower_va as u64) as usize)
     }
 
     /// Unmap memory range from the kernel address space
@@ -109,24 +102,15 @@
     /// * length: Length of the range in bytes
     /// # Return value
     /// The result of the operation
-    pub fn unmap_memory(va: usize, length: usize) -> Result<(), XlatError> {
-        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
-            kernel_space
-                .xlat
-                .lock()
-                .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
-        } else {
-            Err(XlatError::InvalidOperation(String::from(
-                "KernelSpace is not initialized",
-            )))
-        }
+    pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
+        self.xlat
+            .lock()
+            .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
     }
 
     /// Activate kernel address space mapping
-    pub fn activate() {
-        if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
-            kernel_space.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
-        }
+    pub fn activate(&self) {
+        self.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
     }
 
     /// Rounds a value down to a kernel space page boundary
diff --git a/src/lib.rs b/src/lib.rs
index 9592c04..ecaa6b5 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -149,6 +149,7 @@
 }
 
 /// Enum for selecting TTBR0_EL1 or TTBR1_EL1
+#[allow(clippy::upper_case_acronyms)]
 pub enum TTBR {
     TTBR0_EL1,
     TTBR1_EL1,