Handle top bits in upper VA ranges

Set and enforce top bits in virtual address if Upper VA range is
selected for EL1/0 or EL2/0 (VHE). This means that the top bits must
match the selected VA range and Xlat will also return matching virtual
addresses.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I44c2a326a9d3fdd4d82ec01e8f95d1c8f7d305b1
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index f9e1d68..cc04541 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -37,7 +37,9 @@
         Self {
             xlat: Arc::new(Mutex::new(Xlat::new(
                 page_pool,
-                unsafe { VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000) },
+                unsafe {
+                    VirtualAddressRange::from_range(0xffff_fff0_0000_0000..0xffff_ffff_ffff_ffff)
+                },
                 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
                 TranslationGranule::Granule4k,
             ))),
@@ -57,18 +59,26 @@
     ) -> Result<(), XlatError> {
         let mut xlat = self.xlat.lock();
 
-        let code_pa = PhysicalAddress(code_range.start);
-        let data_pa = PhysicalAddress(data_range.start);
+        let code_pa = PhysicalAddress(code_range.start & 0x0000_000f_ffff_ffff);
+        let data_pa = PhysicalAddress(data_range.start & 0x0000_000f_ffff_ffff);
 
         xlat.map_physical_address_range(
-            Some(code_pa.identity_va()),
+            Some(
+                code_pa
+                    .identity_va()
+                    .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
+            ),
             code_pa,
             code_range.len(),
             MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
         )?;
 
         xlat.map_physical_address_range(
-            Some(data_pa.identity_va()),
+            Some(
+                data_pa
+                    .identity_va()
+                    .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
+            ),
             data_pa,
             data_range.len(),
             MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
@@ -92,14 +102,17 @@
     ) -> Result<usize, XlatError> {
         let pa = PhysicalAddress(pa);
 
-        let lower_va = self.xlat.lock().map_physical_address_range(
-            Some(pa.identity_va()),
+        let va = self.xlat.lock().map_physical_address_range(
+            Some(
+                pa.identity_va()
+                    .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
+            ),
             pa,
             length,
             access_rights | MemoryAccessRights::GLOBAL,
         )?;
 
-        Ok(Self::pa_to_kernel(lower_va.0 as u64) as usize)
+        Ok(va.0)
     }
 
     /// Unmap memory range from the kernel address space
@@ -109,10 +122,9 @@
     /// # Return value
     /// The result of the operation
     pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
-        self.xlat.lock().unmap_virtual_address_range(
-            VirtualAddress(Self::kernel_to_pa(va as u64) as usize),
-            length,
-        )
+        self.xlat
+            .lock()
+            .unmap_virtual_address_range(VirtualAddress(va), length)
     }
 
     /// Activate kernel address space mapping