Handle 16k and 64k translation granules
Enable Xlat to handle 16k and 64k translation granules along different
VA bit counts.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iab4fe066e813d5b75a5a6d45ba8498867cc5c541
diff --git a/src/lib.rs b/src/lib.rs
index f5b1962..fec87cc 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -11,7 +11,6 @@
use core::{fmt, panic};
use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
-use alloc::boxed::Box;
use alloc::format;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
@@ -24,7 +23,7 @@
use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
use self::kernel_space::KernelSpace;
-use self::page_pool::{Page, PagePool, Pages};
+use self::page_pool::{PagePool, Pages};
use self::region::{PhysicalRegion, VirtualRegion};
use self::region_pool::{Region, RegionPool, RegionPoolError};
@@ -36,20 +35,6 @@
mod region;
mod region_pool;
-/// The first level of memory descriptors table which
-#[repr(C, align(512))]
-pub struct BaseTable {
- pub descriptors: [Descriptor; 64],
-}
-
-impl BaseTable {
- pub fn new() -> Self {
- BaseTable {
- descriptors: core::array::from_fn(|_| Descriptor::default()),
- }
- }
-}
-
/// Translation table error type
#[derive(Debug)]
pub enum XlatError {
@@ -129,13 +114,12 @@
struct Block {
pa: PhysicalAddress,
va: VirtualAddress,
- granule: usize,
+ size: usize,
}
impl Block {
- fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
- assert!(Xlat::GRANULE_SIZES.contains(&granule));
- Self { pa, va, granule }
+ fn new(pa: PhysicalAddress, va: VirtualAddress, size: usize) -> Self {
+ Self { pa, va, size }
}
}
@@ -144,7 +128,7 @@
f.debug_struct("Block")
.field("pa", &format_args!("{:#010x}", self.pa.0))
.field("va", &format_args!("{:#010x}", self.va.0))
- .field("granule", &format_args!("{:#010x}", self.granule))
+ .field("size", &format_args!("{:#010x}", self.size))
.finish()
}
}
@@ -164,11 +148,12 @@
pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
-pub struct Xlat {
- base_table: Box<BaseTable>,
+pub struct Xlat<const VA_BITS: usize> {
+ base_table: Pages,
page_pool: PagePool,
regions: RegionPool<VirtualRegion>,
regime: TranslationRegime,
+ granule: TranslationGranule<VA_BITS>,
}
/// Memory translation table handling
@@ -196,23 +181,32 @@
/// * map block
/// * unmap block
/// * set access rights of block
-impl Xlat {
- pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
-
+impl<const VA_BITS: usize> Xlat<VA_BITS> {
pub fn new(
page_pool: PagePool,
address: VirtualAddressRange,
regime: TranslationRegime,
+ granule: TranslationGranule<VA_BITS>,
) -> Self {
+ let initial_lookup_level = granule.initial_lookup_level();
+
+ let base_table = page_pool
+ .allocate_pages(
+ granule.table_size::<Descriptor>(initial_lookup_level),
+ Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
+ )
+ .unwrap();
+
let mut regions = RegionPool::new();
regions
.add(VirtualRegion::new(address.start, address.len().unwrap()))
.unwrap();
Self {
- base_table: Box::new(BaseTable::new()),
+ base_table,
page_pool,
regions,
regime,
+ granule,
}
}
@@ -230,13 +224,16 @@
data: &[u8],
access_rights: MemoryAccessRights,
) -> Result<VirtualAddress, XlatError> {
- let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
- XlatError::AllocationError(format!(
- "Cannot allocate pages for {} bytes ({:?})",
- data.len(),
- e
- ))
- })?;
+ let mut pages = self
+ .page_pool
+ .allocate_pages(data.len(), Some(self.granule as usize))
+ .map_err(|e| {
+ XlatError::AllocationError(format!(
+ "Cannot allocate pages for {} bytes ({:?})",
+ data.len(),
+ e
+ ))
+ })?;
pages.copy_data_to_page(data);
@@ -266,9 +263,14 @@
length: usize,
access_rights: MemoryAccessRights,
) -> Result<VirtualAddress, XlatError> {
- let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
- XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
- })?;
+ let mut pages = self
+ .page_pool
+ .allocate_pages(length, Some(self.granule as usize))
+ .map_err(|e| {
+ XlatError::AllocationError(format!(
+ "Cannot allocate pages for {length} bytes ({e:?})"
+ ))
+ })?;
pages.zero_init();
@@ -387,7 +389,43 @@
/// references. After activation the caller must ensure that there are no
/// active references when unmapping memory.
pub unsafe fn activate(&self) {
- let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
+ // Select translation granule
+ let is_tg0 = match &self.regime {
+ TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
+ | TranslationRegime::EL2
+ | TranslationRegime::EL3 => true,
+ TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
+ };
+
+ #[cfg(target_arch = "aarch64")]
+ if is_tg0 {
+ self.modify_tcr(|tcr| {
+ let tg0 = match self.granule {
+ TranslationGranule::Granule4k => 0b00,
+ TranslationGranule::Granule16k => 0b10,
+ TranslationGranule::Granule64k => 0b01,
+ };
+
+ (tcr & !(3 << 14)) | (tg0 << 14)
+ });
+ } else {
+ self.modify_tcr(|tcr| {
+ let tg1 = match self.granule {
+ TranslationGranule::Granule4k => 0b10,
+ TranslationGranule::Granule16k => 0b01,
+ TranslationGranule::Granule64k => 0b11,
+ };
+
+ (tcr & !(3 << 30)) | (tg1 << 30)
+ });
+ }
+
+ // Set translation table
+ let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
#[cfg(target_arch = "aarch64")]
match &self.regime {
@@ -420,13 +458,66 @@
}
}
+ /// Modifies the TCR register of the selected regime of the instance.
+ #[cfg(target_arch = "aarch64")]
+ unsafe fn modify_tcr<F>(&self, f: F)
+ where
+ F: Fn(u64) -> u64,
+ {
+ let mut tcr: u64;
+
+ match &self.regime {
+ TranslationRegime::EL1_0(_, _) => core::arch::asm!(
+ "mrs {0}, tcr_el1
+ isb",
+ out(reg) tcr),
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(_, _) => core::arch::asm!(
+ "mrs {0}, tcr_el2
+ isb",
+ out(reg) tcr),
+ TranslationRegime::EL2 => core::arch::asm!(
+ "mrs {0}, tcr_el2
+ isb",
+ out(reg) tcr),
+ TranslationRegime::EL3 => core::arch::asm!(
+ "mrs {0}, tcr_el3
+ isb",
+ out(reg) tcr),
+ }
+
+ tcr = f(tcr);
+
+ match &self.regime {
+ TranslationRegime::EL1_0(_, _) => core::arch::asm!(
+ "msr tcr_el1, {0}
+ isb",
+ in(reg) tcr),
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(_, _) => core::arch::asm!(
+ "msr tcr_el2, {0}
+ isb",
+ in(reg) tcr),
+ TranslationRegime::EL2 => core::arch::asm!(
+ "msr tcr_el2, {0}
+ isb",
+ in(reg) tcr),
+ TranslationRegime::EL3 => core::arch::asm!(
+ "msr tcr_el3, {0}
+ isb",
+ in(reg) tcr),
+ }
+ }
+
/// Prints the translation tables to debug console recursively
pub fn print(&self) {
- debug!(
- "Xlat table -> {:#010x}",
- self.base_table.descriptors.as_ptr() as u64
+ debug!("Xlat table -> {:#010x}", self.base_table.get_pa().0 as u64);
+ Self::print_table(
+ self.granule.initial_lookup_level(),
+ 0,
+ unsafe { self.base_table.get_as_slice() },
+ self.granule,
);
- Self::print_table(1, 0, &self.base_table.descriptors);
}
/// Prints a single translation table to the debug console
@@ -434,30 +525,36 @@
/// * level: Level of the translation table
/// * va: Base virtual address of the table
/// * table: Table entries
- pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
+ pub fn print_table(
+ level: isize,
+ va: usize,
+ table: &[Descriptor],
+ granule: TranslationGranule<VA_BITS>,
+ ) {
let level_prefix = match level {
0 | 1 => "|-",
2 => "| |-",
_ => "| | |-",
};
- for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
+ for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
match descriptor.get_descriptor_type(level) {
DescriptorType::Block => debug!(
"{} {:#010x} Block -> {:#010x}",
level_prefix,
va,
- descriptor.get_block_output_address(level).0
+ descriptor.get_block_output_address(granule, level).0
),
DescriptorType::Table => {
- let next_level_table = unsafe { descriptor.get_next_level_table(level) };
+ let next_level_table =
+ unsafe { descriptor.get_next_level_table(granule, level) };
debug!(
"{} {:#010x} Table -> {:#010x}",
level_prefix,
va,
next_level_table.as_ptr() as usize
);
- Self::print_table(level + 1, va, next_level_table);
+ Self::print_table(level + 1, va, next_level_table, granule);
}
_ => {}
}
@@ -475,7 +572,12 @@
region: VirtualRegion,
attributes: Attributes,
) -> Result<VirtualAddress, XlatError> {
- let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ let blocks = Self::split_region_to_blocks(
+ region.get_pa(),
+ region.base(),
+ region.length(),
+ self.granule,
+ )?;
for block in blocks {
self.map_block(block, attributes.clone());
}
@@ -488,7 +590,12 @@
/// # Arguments
/// * region: Memory region object
fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
- let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ let blocks = Self::split_region_to_blocks(
+ region.get_pa(),
+ region.base(),
+ region.length(),
+ self.granule,
+ )?;
for block in blocks {
self.unmap_block(block);
}
@@ -510,14 +617,16 @@
/// * pa: Physical address
/// * va: Virtual address
/// * length: Region size in bytes
+ /// * granule: Translation granule
/// # Return value
/// * Vector of granule sized blocks
fn split_region_to_blocks(
mut pa: PhysicalAddress,
mut va: VirtualAddress,
mut length: usize,
+ granule: TranslationGranule<VA_BITS>,
) -> Result<Vec<Block>, XlatError> {
- let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
+ let min_granule_mask = granule.block_size_at_level(3) - 1;
if length == 0 {
return Err(XlatError::InvalidParameterError(
@@ -527,21 +636,25 @@
if (pa.0 | va.0 | length) & min_granule_mask != 0 {
return Err(XlatError::InvalidParameterError(format!(
- "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
- pa.0, va.0, length
+ "Addresses and length must be aligned {:#08x} {:#08x} {:#x} {:#x}",
+ pa.0, va.0, length, min_granule_mask
)));
}
let mut pages = Vec::new();
while length > 0 {
- for granule in &Self::GRANULE_SIZES {
- if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
- pages.push(Block::new(pa, va, *granule));
- pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
- va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
+ let initial_lookup_level = granule.initial_lookup_level();
- length -= *granule;
+ for block_size in
+ (initial_lookup_level..=3).map(|level| granule.block_size_at_level(level))
+ {
+ if (pa.0 | va.0) & (block_size - 1) == 0 && length >= block_size {
+ pages.push(Block::new(pa, va, block_size));
+ pa = pa.add_offset(block_size).ok_or(XlatError::Overflow)?;
+ va = va.add_offset(block_size).ok_or(XlatError::Overflow)?;
+
+ length -= block_size;
break;
}
}
@@ -559,11 +672,12 @@
attributes,
block.pa,
block.va,
- block.granule,
- 1,
- self.base_table.descriptors.as_mut_slice(),
+ block.size,
+ self.granule.initial_lookup_level(),
+ unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
&self.page_pool,
&self.regime,
+ self.granule,
);
}
@@ -573,99 +687,122 @@
/// * attributes: Memory block's permssions, flags
/// * pa: Physical address
/// * va: Virtual address
- /// * granule: Translation granule in bytes
+ /// * block_size: The block size in bytes
/// * level: Translation table level
/// * table: Translation table on the given level
/// * page_pool: Page pool where the function can allocate pages for the translation tables
+ /// * regime: Translation regime
+ /// * granule: Translation granule
#[allow(clippy::too_many_arguments)]
fn set_block_descriptor_recursively(
attributes: Attributes,
pa: PhysicalAddress,
va: VirtualAddress,
- granule: usize,
- level: usize,
+ block_size: usize,
+ level: isize,
table: &mut [Descriptor],
page_pool: &PagePool,
regime: &TranslationRegime,
+ granule: TranslationGranule<VA_BITS>,
) {
// Get descriptor of the current level
- let descriptor = &mut table[va.get_level_index(level)];
+ let descriptor = &mut table[va.get_level_index(granule, level)];
// We reached the required granule level
- if Self::GRANULE_SIZES[level] == granule {
+ if granule.block_size_at_level(level) == block_size {
// Follow break-before-make sequence
descriptor.set_block_or_invalid_descriptor_to_invalid(level);
Self::invalidate(regime, Some(va));
- descriptor.set_block_descriptor(level, pa, attributes);
+ descriptor.set_block_descriptor(granule, level, pa, attributes);
return;
}
// Need to iterate forward
match descriptor.get_descriptor_type(level) {
DescriptorType::Invalid => {
- let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+ let mut page = page_pool
+ .allocate_pages(
+ granule.table_size::<Descriptor>(level + 1),
+ Some(granule.table_alignment::<Descriptor>(level + 1)),
+ )
+ .unwrap();
unsafe {
- let next_table = page.get_as_slice();
+ let next_table = page.get_as_mut_slice();
descriptor.set_table_descriptor(level, next_table, None);
}
Self::set_block_descriptor_recursively(
attributes,
pa,
- va.mask_for_level(level),
- granule,
+ va.mask_for_level(granule, level),
+ block_size,
level + 1,
- unsafe { descriptor.get_next_level_table_mut(level) },
+ unsafe { descriptor.get_next_level_table_mut(granule, level) },
page_pool,
regime,
+ granule,
)
}
DescriptorType::Block => {
// Saving current descriptor details
- let current_va = va.mask_for_level(level);
- let current_pa = descriptor.get_block_output_address(level);
+ let current_va = va.mask_for_level(granule, level);
+ let current_pa = descriptor.get_block_output_address(granule, level);
let current_attributes = descriptor.get_block_attributes(level);
// Replace block descriptor by table descriptor
- let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+
+ // Follow break-before-make sequence
+ descriptor.set_block_or_invalid_descriptor_to_invalid(level);
+ Self::invalidate(regime, Some(current_va));
+
+ let mut page = page_pool
+ .allocate_pages(
+ granule.table_size::<Descriptor>(level + 1),
+ Some(granule.table_alignment::<Descriptor>(level + 1)),
+ )
+ .unwrap();
unsafe {
- let next_table = page.get_as_slice();
+ let next_table = page.get_as_mut_slice();
descriptor.set_table_descriptor(level, next_table, None);
}
// Explode block descriptor to table entries
for exploded_va in VirtualAddressRange::new(
current_va,
- current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
+ current_va
+ .add_offset(granule.block_size_at_level(level))
+ .unwrap(),
)
- .step_by(Self::GRANULE_SIZES[level + 1])
+ .step_by(granule.block_size_at_level(level + 1))
{
let offset = exploded_va.diff(current_va).unwrap();
Self::set_block_descriptor_recursively(
current_attributes.clone(),
current_pa.add_offset(offset).unwrap(),
- exploded_va.mask_for_level(level),
- Self::GRANULE_SIZES[level + 1],
+ exploded_va.mask_for_level(granule, level),
+ granule.block_size_at_level(level + 1),
level + 1,
- unsafe { descriptor.get_next_level_table_mut(level) },
+ unsafe { descriptor.get_next_level_table_mut(granule, level) },
page_pool,
regime,
+ granule,
)
}
// Invoke self to continue recursion on the newly created level
Self::set_block_descriptor_recursively(
- attributes, pa, va, granule, level, table, page_pool, regime,
+ attributes, pa, va, block_size, level, table, page_pool, regime, granule,
);
}
DescriptorType::Table => Self::set_block_descriptor_recursively(
attributes,
pa,
- va.mask_for_level(level),
- granule,
+ va.mask_for_level(granule, level),
+ block_size,
level + 1,
- unsafe { descriptor.get_next_level_table_mut(level) },
+ unsafe { descriptor.get_next_level_table_mut(granule, level) },
page_pool,
regime,
+ granule,
),
}
}
@@ -676,11 +813,12 @@
fn unmap_block(&mut self, block: Block) {
Self::remove_block_descriptor_recursively(
block.va,
- block.granule,
- 1,
- self.base_table.descriptors.as_mut_slice(),
+ block.size,
+ self.granule.initial_lookup_level(),
+ unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
&self.page_pool,
&self.regime,
+ self.granule,
);
}
@@ -688,23 +826,26 @@
/// become empty during the removal process.
/// # Arguments
/// * va: Virtual address
- /// * granule: Translation granule in bytes
+ /// * block_size: Translation block size in bytes
/// * level: Translation table level
/// * table: Translation table on the given level
/// * page_pool: Page pool where the function can release the pages of empty tables
+ /// * regime: Translation regime
+ /// * granule: Translation granule
fn remove_block_descriptor_recursively(
va: VirtualAddress,
- granule: usize,
- level: usize,
+ block_size: usize,
+ level: isize,
table: &mut [Descriptor],
page_pool: &PagePool,
regime: &TranslationRegime,
+ granule: TranslationGranule<VA_BITS>,
) {
// Get descriptor of the current level
- let descriptor = &mut table[va.get_level_index(level)];
+ let descriptor = &mut table[va.get_level_index(granule, level)];
- // We reached the required granule level
- if Self::GRANULE_SIZES[level] == granule {
+ // We reached the required level with the matching block size
+ if granule.block_size_at_level(level) == block_size {
descriptor.set_block_descriptor_to_invalid(level);
Self::invalidate(regime, Some(va));
return;
@@ -716,23 +857,27 @@
panic!("Cannot remove block from non-existing table");
}
DescriptorType::Block => {
- panic!("Cannot remove block with different granule");
+ panic!("Cannot remove block with different block size");
}
DescriptorType::Table => {
- let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
+ let next_level_table =
+ unsafe { descriptor.get_next_level_table_mut(granule, level) };
Self::remove_block_descriptor_recursively(
- va.mask_for_level(level),
- granule,
+ va.mask_for_level(granule, level),
+ block_size,
level + 1,
next_level_table,
page_pool,
regime,
+ granule,
);
if next_level_table.iter().all(|d| !d.is_valid()) {
// Empty table
let mut page = unsafe {
- Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
+ Pages::from_slice(
+ descriptor.set_table_descriptor_to_invalid(granule, level),
+ )
};
page.zero_init();
page_pool.release_pages(page).unwrap();
@@ -741,20 +886,27 @@
}
}
- fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
- Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
+ fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
+ Self::walk_descriptors(
+ va,
+ block_size,
+ self.granule.initial_lookup_level(),
+ unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
+ self.granule,
+ )
}
fn walk_descriptors(
va: VirtualAddress,
- granule: usize,
- level: usize,
+ block_size: usize,
+ level: isize,
table: &mut [Descriptor],
+ granule: TranslationGranule<VA_BITS>,
) -> &mut Descriptor {
// Get descriptor of the current level
- let descriptor = &mut table[va.get_level_index(level)];
+ let descriptor = &mut table[va.get_level_index(granule, level)];
- if Self::GRANULE_SIZES[level] == granule {
+ if granule.block_size_at_level(level) == block_size {
return descriptor;
}
@@ -766,11 +918,13 @@
DescriptorType::Block => {
panic!("Cannot split existing block descriptor to table");
}
- DescriptorType::Table => {
- Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
- descriptor.get_next_level_table_mut(level)
- })
- }
+ DescriptorType::Table => Self::walk_descriptors(
+ va.mask_for_level(granule, level),
+ block_size,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(granule, level) },
+ granule,
+ ),
}
}
@@ -842,16 +996,19 @@
mod tests {
use super::*;
- fn make_block(pa: usize, va: usize, granule: usize) -> Block {
- Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
+ type TestXlat = Xlat<36>;
+
+ fn make_block(pa: usize, va: usize, size: usize) -> Block {
+ Block::new(PhysicalAddress(pa), VirtualAddress(va), size)
}
#[test]
fn test_split_to_pages() {
- let pages = Xlat::split_region_to_blocks(
+ let pages = TestXlat::split_region_to_blocks(
PhysicalAddress(0x3fff_c000),
VirtualAddress(0x3fff_c000),
0x4020_5000,
+ TranslationGranule::Granule4k,
)
.unwrap();
assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
@@ -865,10 +1022,11 @@
#[test]
fn test_split_to_pages_unaligned() {
- let pages = Xlat::split_region_to_blocks(
+ let pages = TestXlat::split_region_to_blocks(
PhysicalAddress(0x3fff_c000),
VirtualAddress(0x3f20_0000),
0x200000,
+ TranslationGranule::Granule4k,
)
.unwrap();
for (i, block) in pages.iter().enumerate().take(512) {