Remove KernelSpace usage from Descriptor
Descriptor now simply returns or sets PhysicalAddress instead of
using &[Descriptor]. Converting the physical address to a kernel
space virtual address is now handled by Xlat.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I955c3493a8da424649599889962192a3955f4a5b
diff --git a/src/descriptor.rs b/src/descriptor.rs
index 627799c..b5db7cc 100644
--- a/src/descriptor.rs
+++ b/src/descriptor.rs
@@ -8,7 +8,6 @@
use core::cell::UnsafeCell;
use core::ptr;
-use crate::kernel_space::KernelSpace;
use crate::MemoryAttributesIndex;
use super::address::PhysicalAddress;
@@ -294,19 +293,16 @@
}
/// Set table descriptor
- ///
- /// Safety: The caller has to ensure that the passed next level table has the same life as
- /// the descriptor.
- pub unsafe fn set_table_descriptor(
+ pub fn set_table_descriptor(
&mut self,
level: isize,
- next_level_table: &mut [Descriptor],
+ next_level_table: PhysicalAddress,
next_level_attributes: Option<NextLevelAttributes>,
) {
assert!(level <= 2);
assert!(self.get_descriptor_type(level) != DescriptorType::Table);
- let table_addr = KernelSpace::kernel_to_pa(next_level_table.as_ptr() as u64);
+ let table_addr = next_level_table.0 as u64;
assert_eq!(0, table_addr & !Self::TA_MASK);
let mut raw_desc_value = Self::VALID_BIT | Self::TABLE_BIT | table_addr;
@@ -321,37 +317,11 @@
}
/// Get next level table
- ///
- /// Safety: The returned next level table is based on the address read from the descriptor.
- /// The caller has to ensure that no other references are being used of the table.
- pub unsafe fn get_next_level_table<const VA_BITS: usize>(
- &self,
- granule: TranslationGranule<VA_BITS>,
- level: isize,
- ) -> &[Descriptor] {
+ pub fn get_next_level_table(&self, level: isize) -> PhysicalAddress {
assert!(level <= 2);
assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
- let table_address =
- KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *const Descriptor;
- core::slice::from_raw_parts(table_address, granule.entry_count_at_level(level + 1))
- }
-
- /// Get mutable next level table
- ///
- /// Safety: The returned next level table is based on the address read from the descriptor.
- /// The caller has to ensure that no other references are being used of the table.
- pub unsafe fn get_next_level_table_mut<const VA_BITS: usize>(
- &mut self,
- granule: TranslationGranule<VA_BITS>,
- level: isize,
- ) -> &mut [Descriptor] {
- assert!(level <= 2);
- assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
-
- let table_address =
- KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
- core::slice::from_raw_parts_mut(table_address, granule.entry_count_at_level(level + 1))
+ PhysicalAddress((self.get() & Self::TA_MASK) as usize)
}
/// Get next level attributes
@@ -363,21 +333,13 @@
}
/// Set table descriptor to invalid
- ///
- /// **Unsafe:** The returned descriptor reference must be released by the caller, i.e. release
- /// to `PagePool`
- pub unsafe fn set_table_descriptor_to_invalid<const VA_BITS: usize>(
- &mut self,
- granule: TranslationGranule<VA_BITS>,
- level: isize,
- ) -> &mut [Descriptor] {
+ pub fn set_table_descriptor_to_invalid(&mut self, level: isize) -> PhysicalAddress {
assert!(level <= 2);
assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
- let table_address =
- KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
+ let pa = PhysicalAddress((self.get() & Self::TA_MASK) as usize);
self.set(Self::INVALID_DESCRIPTOR_VALUE);
- core::slice::from_raw_parts_mut(table_address, granule.entry_count_at_level(level + 1))
+ pa
}
/// Get raw descriptor value
@@ -691,94 +653,62 @@
#[test]
#[should_panic]
fn test_descriptor_level3_to_table() {
- let mut next_level_table = [Descriptor {
- cell: UnsafeCell::new(0),
- }];
let mut descriptor = Descriptor {
cell: UnsafeCell::new(0),
};
- unsafe {
- descriptor.set_table_descriptor(3, &mut next_level_table, None);
- }
+ descriptor.set_table_descriptor(3, PhysicalAddress(0), None);
}
#[test]
fn test_descriptor_block_to_table() {
- let next_level_table =
- unsafe { core::slice::from_raw_parts_mut(0x1000 as *mut Descriptor, 512) };
+ let next_level_table = PhysicalAddress(0x1000);
let mut descriptor = Descriptor {
cell: UnsafeCell::new(1),
};
- unsafe {
- descriptor.set_table_descriptor(0, next_level_table, None);
- }
+ descriptor.set_table_descriptor(0, next_level_table, None);
assert_eq!(0x1003, descriptor.get());
}
#[test]
#[should_panic]
- fn test_descriptor_table_invalid_count() {
- let next_level_table =
- unsafe { core::slice::from_raw_parts_mut(0x800 as *mut Descriptor, 511) };
- let mut descriptor = Descriptor {
- cell: UnsafeCell::new(0),
- };
-
- unsafe {
- descriptor.set_table_descriptor(0, next_level_table, None);
- }
- }
-
- #[test]
- #[should_panic]
fn test_descriptor_table_non_aligned() {
- let next_level_table =
- unsafe { core::slice::from_raw_parts_mut(0x800 as *mut Descriptor, 512) };
+ let next_level_table = PhysicalAddress(0x800);
let mut descriptor = Descriptor {
cell: UnsafeCell::new(0),
};
- unsafe {
- descriptor.set_table_descriptor(0, next_level_table, None);
- }
+ descriptor.set_table_descriptor(0, next_level_table, None);
}
#[test]
fn test_descriptor_table() {
- let next_level_table = unsafe {
- core::slice::from_raw_parts_mut(0x0000_000c_ba98_7000 as *mut Descriptor, 512)
- };
+ let next_level_table = PhysicalAddress(0x0000_000c_ba98_7000);
let mut descriptor = Descriptor {
cell: UnsafeCell::new(0),
};
- unsafe {
- descriptor.set_table_descriptor(0, next_level_table, None);
- }
+ descriptor.set_table_descriptor(0, next_level_table, None);
assert_eq!(0x0000_000c_ba98_7003, descriptor.get());
}
#[test]
fn test_descriptor_table_next_level_attr() {
const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
- let next_level_table =
- unsafe { core::slice::from_raw_parts_mut(NEXT_LEVEL_ADDR as *mut Descriptor, 512) };
+ let next_level_table = PhysicalAddress(0x0000_000c_ba98_7000);
let mut descriptor = Descriptor {
cell: UnsafeCell::new(0),
};
- unsafe {
- descriptor.set_table_descriptor(
- 0,
- next_level_table,
- Some(NextLevelAttributes {
- ns_table: true,
- ..Default::default()
- }),
- );
- }
+ descriptor.set_table_descriptor(
+ 0,
+ next_level_table,
+ Some(NextLevelAttributes {
+ ns_table: true,
+ ..Default::default()
+ }),
+ );
assert_eq!(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003, descriptor.get());
}
@@ -788,24 +718,7 @@
let descriptor = Descriptor {
cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
};
- assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
- descriptor
- .get_next_level_table::<36>(TranslationGranule::Granule4k, 0)
- .as_ptr() as u64
- });
- }
-
- #[test]
- fn test_descriptor_table_get_next_level_table_mut() {
- const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
- let mut descriptor = Descriptor {
- cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
- };
- assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
- descriptor
- .get_next_level_table_mut::<36>(TranslationGranule::Granule4k, 0)
- .as_ptr() as *mut Descriptor as u64
- });
+ assert_eq!(NEXT_LEVEL_ADDR, descriptor.get_next_level_table(0).0 as u64);
}
#[test]
@@ -829,11 +742,10 @@
let mut descriptor = Descriptor {
cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
};
- assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
- descriptor
- .set_table_descriptor_to_invalid::<36>(TranslationGranule::Granule4k, 0)
- .as_ptr() as *mut Descriptor as u64
- });
+ assert_eq!(
+ NEXT_LEVEL_ADDR,
+ descriptor.set_table_descriptor_to_invalid(0).0 as u64
+ );
assert_eq!(0, descriptor.get());
}
diff --git a/src/lib.rs b/src/lib.rs
index 202776d..b5f95d6 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -516,15 +516,15 @@
)?;
}
DescriptorType::Table => {
- let next_level_table =
- unsafe { descriptor.get_next_level_table(granule, level) };
+ let table_pa = descriptor.get_next_level_table(level);
writeln!(
f,
"{} {:#010x} Table -> {:#010x}",
- level_prefix,
- va,
- next_level_table.as_ptr() as usize
+ level_prefix, va, table_pa.0
)?;
+
+ let next_level_table =
+ unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Self::dump_table(f, level + 1, va, next_level_table, granule)?;
}
_ => {}
@@ -672,7 +672,10 @@
if result.is_ok() {
// Set table descriptor if the table is configured properly
- unsafe { descriptor.set_table_descriptor(level, next_table, None) };
+ let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
+ next_table.as_ptr() as u64,
+ ) as usize);
+ descriptor.set_table_descriptor(level, next_table_pa, None);
} else {
// Release next level table on error and keep invalid descriptor on current level
page_pool.release_pages(page).unwrap();
@@ -743,12 +746,16 @@
);
if result.is_ok() {
+ let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
+ next_table.as_ptr() as u64,
+ ) as usize);
+
// Follow break-before-make sequence
descriptor.set_block_or_invalid_descriptor_to_invalid(level);
Self::invalidate(regime, Some(current_va));
// Set table descriptor if the table is configured properly
- unsafe { descriptor.set_table_descriptor(level, next_table, None) };
+ descriptor.set_table_descriptor(level, next_table_pa, None);
} else {
// Release next level table on error and keep invalid descriptor on current level
page_pool.release_pages(page).unwrap();
@@ -756,17 +763,27 @@
result
}
- DescriptorType::Table => Self::set_block_descriptor_recursively(
- attributes,
- pa,
- va.mask_for_level(granule, level),
- block_size,
- level + 1,
- unsafe { descriptor.get_next_level_table_mut(granule, level) },
- page_pool,
- regime,
- granule,
- ),
+ DescriptorType::Table => {
+ let next_level_table = unsafe {
+ Self::get_table_from_pa_mut(
+ descriptor.get_next_level_table(level),
+ granule,
+ level + 1,
+ )
+ };
+
+ Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va.mask_for_level(granule, level),
+ block_size,
+ level + 1,
+ next_level_table,
+ page_pool,
+ regime,
+ granule,
+ )
+ }
}
}
@@ -823,8 +840,14 @@
panic!("Cannot remove block with different block size");
}
DescriptorType::Table => {
- let next_level_table =
- unsafe { descriptor.get_next_level_table_mut(granule, level) };
+ let next_level_table = unsafe {
+ Self::get_table_from_pa_mut(
+ descriptor.get_next_level_table(level),
+ granule,
+ level + 1,
+ )
+ };
+
Self::remove_block_descriptor_recursively(
va.mask_for_level(granule, level),
block_size,
@@ -838,10 +861,11 @@
if next_level_table.iter().all(|d| !d.is_valid()) {
// Empty table
let mut page = unsafe {
- Pages::from_slice(
- descriptor.set_table_descriptor_to_invalid(granule, level),
- )
+ let table_pa = descriptor.set_table_descriptor_to_invalid(level);
+ let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
+ Pages::from_slice(next_table)
};
+
page.zero_init();
page_pool.release_pages(page).unwrap();
}
@@ -881,13 +905,61 @@
DescriptorType::Block => {
panic!("Cannot split existing block descriptor to table");
}
- DescriptorType::Table => Self::walk_descriptors(
- va.mask_for_level(granule, level),
- block_size,
- level + 1,
- unsafe { descriptor.get_next_level_table_mut(granule, level) },
- granule,
- ),
+ DescriptorType::Table => {
+ let next_level_table = unsafe {
+ Self::get_table_from_pa_mut(
+ descriptor.get_next_level_table(level),
+ granule,
+ level + 1,
+ )
+ };
+
+ Self::walk_descriptors(
+ va.mask_for_level(granule, level),
+ block_size,
+ level + 1,
+ next_level_table,
+ granule,
+ )
+ }
+ }
+ }
+
+ /// Create a translation table descriptor slice from a physical address.
+ ///
+ /// # Safety
+ /// The caller must ensure that the physical address points to a valid translation table and
+ /// it it mapped into the virtual address space of the running kernel context.
+ unsafe fn get_table_from_pa<'a>(
+ pa: PhysicalAddress,
+ granule: TranslationGranule<VA_BITS>,
+ level: isize,
+ ) -> &'a [Descriptor] {
+ let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
+ unsafe {
+ core::slice::from_raw_parts(
+ table_va as *const Descriptor,
+ granule.entry_count_at_level(level),
+ )
+ }
+ }
+
+ /// Create a mutable translation table descriptor slice from a physical address.
+ ///
+ /// # Safety
+ /// The caller must ensure that the physical address points to a valid translation table and
+ /// it it mapped into the virtual address space of the running kernel context.
+ unsafe fn get_table_from_pa_mut<'a>(
+ pa: PhysicalAddress,
+ granule: TranslationGranule<VA_BITS>,
+ level: isize,
+ ) -> &'a mut [Descriptor] {
+ let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
+ unsafe {
+ core::slice::from_raw_parts_mut(
+ table_va as *mut Descriptor,
+ granule.entry_count_at_level(level),
+ )
}
}