Implement KernelMapper

Objects in the physical address space can be wrapped into a KernelMapper
which maps it to the virtual kernel address space and provides the same
access to the object via the Deref trait. When the mapper is dropped it
unmaps the mapped object from the virtual kernel space.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I4f878b69ba04ac9da34926544b7a5f62a1de3526
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index 4aab460..d0a52a2 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -3,7 +3,7 @@
 
 //! Module for converting addresses between kernel virtual address space to physical address space
 
-use core::ops::Range;
+use core::ops::{Deref, DerefMut, Range};
 
 use alloc::sync::Arc;
 use spin::Mutex;
@@ -20,7 +20,7 @@
     xlat: Arc<Mutex<Xlat>>,
 }
 
-/// Kernel space memory mapping
+/// # Kernel space memory mapping
 ///
 /// This object handles the translation tables of the kernel address space. The main goal is to
 /// limit the kernel's access to the memory and only map the ranges which are necessary for
@@ -151,3 +151,164 @@
         pa
     }
 }
+
+/// # Kernel mapping wrapper
+///
+/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
+/// virtual kernel address space and provides the same access to the object via the Deref trait.
+/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
+pub struct KernelMapper<T>
+where
+    T: Deref,
+    T::Target: Sized,
+{
+    physical_instance: T,
+    va: *const T::Target,
+    kernel_space: KernelSpace,
+}
+
+impl<T> KernelMapper<T>
+where
+    T: Deref,
+    T::Target: Sized,
+{
+    /// Create new mapped object
+    /// The access_rights parameter must contain read access
+    pub fn new(
+        physical_instance: T,
+        kernel_space: KernelSpace,
+        access_rights: MemoryAccessRights,
+    ) -> Self {
+        assert!(access_rights.contains(MemoryAccessRights::R));
+
+        let pa = physical_instance.deref() as *const _ as usize;
+        let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
+
+        let va = kernel_space
+            .map_memory(pa, length, access_rights)
+            .expect("Failed to map area");
+
+        Self {
+            physical_instance,
+            va: va as *const T::Target,
+            kernel_space,
+        }
+    }
+}
+
+impl<T> Deref for KernelMapper<T>
+where
+    T: Deref,
+    T::Target: Sized,
+{
+    type Target = T::Target;
+
+    #[inline(always)]
+    fn deref(&self) -> &Self::Target {
+        unsafe { &*self.va }
+    }
+}
+
+impl<T> Drop for KernelMapper<T>
+where
+    T: Deref,
+    T::Target: Sized,
+{
+    fn drop(&mut self) {
+        let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
+        self.kernel_space
+            .unmap_memory(self.va as usize, length)
+            .expect("Failed to unmap area");
+    }
+}
+
+unsafe impl<T> Send for KernelMapper<T>
+where
+    T: Deref + Send,
+    T::Target: Sized,
+{
+}
+
+/// # Mutable version of kernel mapping wrapper
+pub struct KernelMapperMut<T>
+where
+    T: DerefMut,
+    T::Target: Sized,
+{
+    physical_instance: T,
+    va: *mut T::Target,
+    kernel_space: KernelSpace,
+}
+
+impl<T> KernelMapperMut<T>
+where
+    T: DerefMut,
+    T::Target: Sized,
+{
+    /// Create new mapped object
+    /// The access_rights parameter must contain read and write access
+    pub fn new(
+        physical_instance: T,
+        kernel_space: KernelSpace,
+        access_rights: MemoryAccessRights,
+    ) -> Self {
+        assert!(access_rights.contains(MemoryAccessRights::RW));
+
+        let pa = physical_instance.deref() as *const _ as usize;
+        let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
+
+        let va = kernel_space
+            .map_memory(pa, length, access_rights)
+            .expect("Failed to map area");
+
+        Self {
+            physical_instance,
+            va: va as *mut T::Target,
+            kernel_space,
+        }
+    }
+}
+
+impl<T> Deref for KernelMapperMut<T>
+where
+    T: DerefMut,
+    T::Target: Sized,
+{
+    type Target = T::Target;
+
+    #[inline(always)]
+    fn deref(&self) -> &Self::Target {
+        unsafe { &*self.va }
+    }
+}
+
+impl<T> DerefMut for KernelMapperMut<T>
+where
+    T: DerefMut,
+    T::Target: Sized,
+{
+    #[inline(always)]
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        unsafe { &mut *self.va }
+    }
+}
+
+impl<T> Drop for KernelMapperMut<T>
+where
+    T: DerefMut,
+    T::Target: Sized,
+{
+    fn drop(&mut self) {
+        let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
+        self.kernel_space
+            .unmap_memory(self.va as usize, length)
+            .expect("Failed to unmap area");
+    }
+}
+
+unsafe impl<T> Send for KernelMapperMut<T>
+where
+    T: DerefMut + Send,
+    T::Target: Sized,
+{
+}