Introduce KernelAddressTranslator trait
Add KernelAddressTranslator as a generic parameter of Xlat in order to
decouple dependency on KernelSpace. This trait is used for translating
between physical addresses and virtual addresses of the running kernel
context. Xlat uses the trait for accessing the translation tables.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iaf4189429f21fced9d40e34fb309388165127124
diff --git a/src/lib.rs b/src/lib.rs
index c8abbdd..63f1e4b 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -8,6 +8,7 @@
use core::fmt;
use core::iter::zip;
+use core::marker::PhantomData;
use core::panic;
use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
@@ -20,7 +21,6 @@
use self::descriptor::DescriptorType;
use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
-use self::kernel_space::KernelSpace;
use self::page_pool::{PagePool, Pages};
use self::region::{PhysicalRegion, VirtualRegion};
use self::region_pool::{Region, RegionPool, RegionPoolError};
@@ -142,12 +142,20 @@
pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
-pub struct Xlat<const VA_BITS: usize> {
+/// Trait for converting between virtual address space of the running kernel environment and
+/// the physical address space.
+pub trait KernelAddressTranslator {
+ fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
+ fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
+}
+
+pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> {
base_table: Pages,
page_pool: PagePool,
regions: RegionPool<VirtualRegion>,
regime: TranslationRegime,
granule: TranslationGranule<VA_BITS>,
+ _kernel_address_translator: PhantomData<K>,
}
/// Memory translation table handling
@@ -175,7 +183,7 @@
/// * map block
/// * unmap block
/// * set access rights of block
-impl<const VA_BITS: usize> Xlat<VA_BITS> {
+impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
pub fn new(
page_pool: PagePool,
address: VirtualAddressRange,
@@ -210,6 +218,7 @@
regions,
regime,
granule,
+ _kernel_address_translator: PhantomData,
}
}
@@ -232,7 +241,7 @@
.allocate_pages(data.len(), Some(self.granule as usize))
.map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
- pages.copy_data_to_page(data);
+ pages.copy_data_to_page::<K>(data);
let pages_length = pages.length();
let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
@@ -265,7 +274,7 @@
.allocate_pages(length, Some(self.granule as usize))
.map_err(|e| XlatError::PageAllocationError(e, length))?;
- pages.zero_init();
+ pages.zero_init::<K>();
let pages_length = pages.length();
let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
@@ -418,7 +427,7 @@
}
// Set translation table
- let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
+ let base_table_pa = self.base_table.get_pa().0 as u64;
match &self.regime {
TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
@@ -616,7 +625,7 @@
block.va,
block.size,
self.granule.initial_lookup_level(),
- unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
+ unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
&self.page_pool,
&self.regime,
self.granule,
@@ -675,7 +684,7 @@
)
})?;
- let next_table = unsafe { page.get_as_mut_slice() };
+ let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
// Fill next level table
let result = Self::set_block_descriptor_recursively(
@@ -692,9 +701,8 @@
if result.is_ok() {
// Set table descriptor if the table is configured properly
- let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
- next_table.as_ptr() as u64,
- ) as usize);
+ let next_table_pa =
+ K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
descriptor.set_table_descriptor(level, next_table_pa, None);
} else {
// Release next level table on error and keep invalid descriptor on current level
@@ -724,7 +732,7 @@
)
})?;
- let next_table = unsafe { page.get_as_mut_slice() };
+ let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
// Explode existing block descriptor into table entries
for exploded_va in VirtualAddressRange::new(
@@ -766,9 +774,8 @@
);
if result.is_ok() {
- let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
- next_table.as_ptr() as u64,
- ) as usize);
+ let next_table_pa =
+ K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
// Follow break-before-make sequence
descriptor.set_block_or_invalid_descriptor_to_invalid(level);
@@ -815,7 +822,7 @@
block.va,
block.size,
self.granule.initial_lookup_level(),
- unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
+ unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
&self.page_pool,
&self.regime,
self.granule,
@@ -883,10 +890,10 @@
let mut page = unsafe {
let table_pa = descriptor.set_table_descriptor_to_invalid(level);
let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
- Pages::from_slice(next_table)
+ Pages::from_slice::<K, Descriptor>(next_table)
};
- page.zero_init();
+ page.zero_init::<K>();
page_pool.release_pages(page).unwrap();
}
}
@@ -898,7 +905,7 @@
va,
block_size,
self.granule.initial_lookup_level(),
- unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
+ unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
self.granule,
)
}
@@ -955,10 +962,10 @@
granule: TranslationGranule<VA_BITS>,
level: isize,
) -> &'a [Descriptor] {
- let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
+ let table_va = K::pa_to_kernel(pa);
unsafe {
core::slice::from_raw_parts(
- table_va as *const Descriptor,
+ table_va.0 as *const Descriptor,
granule.entry_count_at_level(level),
)
}
@@ -974,10 +981,10 @@
granule: TranslationGranule<VA_BITS>,
level: isize,
) -> &'a mut [Descriptor] {
- let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
+ let table_va = K::pa_to_kernel(pa);
unsafe {
core::slice::from_raw_parts_mut(
- table_va as *mut Descriptor,
+ table_va.0 as *mut Descriptor,
granule.entry_count_at_level(level),
)
}
@@ -1050,7 +1057,7 @@
fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
}
-impl<const VA_BITS: usize> fmt::Debug for Xlat<VA_BITS> {
+impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Xlat")
.field("regime", &self.regime)
@@ -1063,7 +1070,7 @@
f,
self.granule.initial_lookup_level(),
0,
- unsafe { self.base_table.get_as_slice() },
+ unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
self.granule,
)?;