Add translation table library
Add AArch64 MMU handler component.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Ief463cb783e1b8f825d8be37bb42988992879e68
diff --git a/src/lib.rs b/src/lib.rs
index 4e11d95..4986ceb 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,736 @@
// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
// SPDX-License-Identifier: MIT OR Apache-2.0
-#![cfg_attr(not(test), no_std)]
\ No newline at end of file
+#![allow(dead_code)]
+#![allow(non_camel_case_types)]
+#![cfg_attr(not(test), no_std)]
+
+extern crate alloc;
+
+use core::arch::asm;
+use core::iter::zip;
+use core::{fmt, panic};
+
+use alloc::boxed::Box;
+use alloc::format;
+use alloc::string::{String, ToString};
+use alloc::vec::Vec;
+use log::debug;
+
+use bitflags::bitflags;
+use packed_struct::prelude::*;
+
+use self::descriptor::DescriptorType;
+
+use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
+use self::kernel_space::KernelSpace;
+use self::page_pool::{Page, PagePool, Pages};
+use self::region::{PhysicalRegion, VirtualRegion};
+use self::region_pool::{Region, RegionPool, RegionPoolError};
+
+mod descriptor;
+pub mod kernel_space;
+pub mod page_pool;
+mod region;
+mod region_pool;
+
+/// The first level of memory descriptors table which
+#[repr(C, align(512))]
+pub struct BaseTable {
+ pub descriptors: [Descriptor; 64],
+}
+
+impl BaseTable {
+ pub fn new() -> Self {
+ BaseTable {
+ descriptors: unsafe { core::mem::transmute([0u64; 64]) },
+ }
+ }
+}
+
+/// Translation table error type
+#[derive(Debug)]
+pub enum XlatError {
+ InvalidParameterError(String),
+ AllocationError(String),
+ AlignmentError(String),
+ Overflow,
+ InvalidOperation(String),
+ Overlap,
+ NotFound,
+ RegionPoolError(RegionPoolError),
+}
+
+/// Memory attributes
+///
+/// MAIR_EL1 should be configured in the same way in startup.s
+#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
+pub enum MemoryAttributesIndex {
+ #[default]
+ Device_nGnRnE = 0x00,
+ Normal_IWBWA_OWBWA = 0x01,
+}
+
+bitflags! {
+ #[derive(Debug, Clone, Copy)]
+ pub struct MemoryAccessRights : u32 {
+ const R = 0b00000001;
+ const W = 0b00000010;
+ const X = 0b00000100;
+ const NS = 0b00001000;
+
+ const RW = Self::R.bits() | Self::W.bits();
+ const RX = Self::R.bits() | Self::X.bits();
+ const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
+
+ const USER = 0b00010000;
+ const DEVICE = 0b00100000;
+ }
+}
+
+impl From<MemoryAccessRights> for Attributes {
+ fn from(access_rights: MemoryAccessRights) -> Self {
+ let data_access_permissions = match (
+ access_rights.contains(MemoryAccessRights::USER),
+ access_rights.contains(MemoryAccessRights::W),
+ ) {
+ (false, false) => DataAccessPermissions::ReadOnly_None,
+ (false, true) => DataAccessPermissions::ReadWrite_None,
+ (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
+ (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
+ };
+
+ let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
+ MemoryAttributesIndex::Device_nGnRnE
+ } else {
+ MemoryAttributesIndex::Normal_IWBWA_OWBWA
+ };
+
+ Attributes {
+ uxn: !access_rights.contains(MemoryAccessRights::X)
+ || !access_rights.contains(MemoryAccessRights::USER),
+ pxn: !access_rights.contains(MemoryAccessRights::X)
+ || access_rights.contains(MemoryAccessRights::USER),
+ contiguous: false,
+ not_global: true,
+ access_flag: true,
+ shareability: Shareability::NonShareable,
+ data_access_permissions,
+ non_secure: access_rights.contains(MemoryAccessRights::NS),
+ mem_attr_index,
+ }
+ }
+}
+
+#[derive(PartialEq)]
+struct Block {
+ pa: usize,
+ va: usize,
+ granule: usize,
+}
+
+impl Block {
+ fn new(pa: usize, va: usize, granule: usize) -> Self {
+ assert!(Xlat::GRANULE_SIZES.contains(&granule));
+ Self { pa, va, granule }
+ }
+}
+
+impl fmt::Debug for Block {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Block")
+ .field("pa", &format_args!("{:#010x}", self.pa))
+ .field("va", &format_args!("{:#010x}", self.va))
+ .field("granule", &format_args!("{:#010x}", self.granule))
+ .finish()
+ }
+}
+
+pub struct Xlat {
+ base_table: Box<BaseTable>,
+ page_pool: PagePool,
+ regions: RegionPool<VirtualRegion>,
+}
+
+/// Memory translation table handling
+/// # High level interface
+/// * allocate and map zero initialized region (with or without VA)
+/// * allocate and map memory region and load contents (with or without VA)
+/// * map memory region by PA (with or without VA)
+/// * unmap memory region by PA
+/// * query PA by VA
+/// * set access rights of mapped memory areas
+/// * active mapping
+///
+/// # Debug features
+/// * print translation table details
+///
+/// # Region level interface
+/// * map regions
+/// * unmap region
+/// * find a mapped region which contains
+/// * find empty area for region
+/// * set access rights for a region
+/// * create blocks by region
+///
+/// # Block level interface
+/// * map block
+/// * unmap block
+/// * set access rights of block
+impl Xlat {
+ const BASE_VA: usize = 0x4000_0000;
+ pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
+
+ pub fn new(page_pool: PagePool) -> Self {
+ let mut regions = RegionPool::new();
+ regions
+ .add(VirtualRegion::new(
+ Self::BASE_VA,
+ 0x1_0000_0000 - Self::BASE_VA,
+ ))
+ .unwrap();
+ Self {
+ base_table: Box::new(BaseTable::new()),
+ page_pool,
+ regions,
+ }
+ }
+
+ /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
+ /// initial data
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * data: Data to be loaded to the memory area
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn allocate_initalized_range(
+ &mut self,
+ va: Option<usize>,
+ data: &[u8],
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
+ XlatError::AllocationError(format!(
+ "Cannot allocate pages for {} bytes ({:?})",
+ data.len(),
+ e
+ ))
+ })?;
+
+ pages.copy_data_to_page(data);
+
+ let pages_length = pages.length();
+ let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
+ let region = if let Some(required_va) = va {
+ self.regions
+ .acquire(required_va, pages_length, physical_region)
+ } else {
+ self.regions.allocate(pages_length, physical_region)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn allocate_zero_init_range(
+ &mut self,
+ va: Option<usize>,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
+ XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
+ })?;
+
+ pages.zero_init();
+
+ let pages_length = pages.length();
+ let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
+ let region = if let Some(required_va) = va {
+ self.regions
+ .acquire(required_va, pages_length, physical_region)
+ } else {
+ self.regions.allocate(pages_length, physical_region)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Map memory area by physical address
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * pa: Physical address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn map_physical_address_range(
+ &mut self,
+ va: Option<usize>,
+ pa: usize,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let resource = PhysicalRegion::PhysicalAddress(pa);
+ let region = if let Some(required_va) = va {
+ self.regions.acquire(required_va, length, resource)
+ } else {
+ self.regions.allocate(length, resource)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Unmap memory area by virtual address
+ /// # Arguments
+ /// * va: Virtual address
+ /// * length: Length of the memory area in bytes
+ pub fn unmap_virtual_address_range(
+ &mut self,
+ va: usize,
+ length: usize,
+ ) -> Result<(), XlatError> {
+ let pa = self.get_pa_by_va(va, length)?;
+
+ let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
+
+ self.unmap_region(®ion_to_release)?;
+
+ self.regions
+ .release(region_to_release)
+ .map_err(XlatError::RegionPoolError)
+ }
+
+ /// Query physical address by virtual address range. Only returns a value if the memory area
+ /// mapped as continuous area.
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// # Return value
+ /// * Physical address of the mapped memory
+ pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
+ let containing_region = self
+ .find_containing_region(va, length)
+ .ok_or(XlatError::NotFound)?;
+
+ if !containing_region.used() {
+ return Err(XlatError::NotFound);
+ }
+
+ Ok(containing_region.get_pa_for_va(va))
+ }
+
+ /// Sets the memory access right of memory area
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: New memory access rights of the area
+ pub fn set_access_rights(
+ &mut self,
+ va: usize,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<(), XlatError> {
+ let containing_region = self
+ .find_containing_region(va, length)
+ .ok_or(XlatError::NotFound)?;
+
+ if !containing_region.used() {
+ return Err(XlatError::NotFound);
+ }
+
+ let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
+ self.map_region(region, access_rights.into())?;
+
+ Ok(())
+ }
+
+ /// Activate memory mapping represented by the object
+ /// # Arguments
+ /// * asid: ASID of the table base address
+ pub fn activate(&self, asid: u8) {
+ let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
+ let ttbr = ((asid as u64) << 48) | base_table_pa;
+ unsafe {
+ #[cfg(target_arch = "aarch64")]
+ asm!(
+ "msr ttbr0_el1, {0}
+ isb",
+ in(reg) ttbr)
+ };
+ }
+
+ /// Prints the translation tables to debug console recursively
+ pub fn print(&self) {
+ debug!(
+ "Xlat table -> {:#010x}",
+ self.base_table.descriptors.as_ptr() as u64
+ );
+ Self::print_table(1, 0, &self.base_table.descriptors);
+ }
+
+ /// Prints a single translation table to the debug console
+ /// # Arguments
+ /// * level: Level of the translation table
+ /// * va: Base virtual address of the table
+ /// * table: Table entries
+ pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
+ let level_prefix = match level {
+ 0 | 1 => "|-",
+ 2 => "| |-",
+ _ => "| | |-",
+ };
+
+ for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Block => debug!(
+ "{} {:#010x} Block -> {:#010x}",
+ level_prefix,
+ va,
+ descriptor.get_block_output_address(level)
+ ),
+ DescriptorType::Table => {
+ let next_level_table = unsafe { descriptor.get_next_level_table(level) };
+ debug!(
+ "{} {:#010x} Table -> {:#010x}",
+ level_prefix,
+ va,
+ next_level_table.as_ptr() as usize
+ );
+ Self::print_table(level + 1, va, next_level_table);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ /// Adds memory region from the translation table. The function splits the region to blocks and
+ /// uses the block level functions to do the mapping.
+ /// # Arguments
+ /// * region: Memory region object
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ fn map_region(
+ &mut self,
+ region: VirtualRegion,
+ attributes: Attributes,
+ ) -> Result<usize, XlatError> {
+ let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ for block in blocks {
+ self.map_block(block, attributes.clone());
+ }
+
+ Ok(region.base())
+ }
+
+ /// Remove memory region from the translation table. The function splits the region to blocks
+ /// and uses the block level functions to do the unmapping.
+ /// # Arguments
+ /// * region: Memory region object
+ fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
+ let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ for block in blocks {
+ self.unmap_block(block);
+ }
+
+ Ok(())
+ }
+
+ /// Find mapped region that contains the whole region
+ /// # Arguments
+ /// * region: Virtual address to look for
+ /// # Return value
+ /// * Reference to virtual region if found
+ fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
+ self.regions.find_containing_region(va, length).ok()
+ }
+
+ /// Splits memory region to blocks that matches the granule size of the translation table.
+ /// # Arguments
+ /// * pa: Physical address
+ /// * va: Virtual address
+ /// * length: Region size in bytes
+ /// # Return value
+ /// * Vector of granule sized blocks
+ fn split_region_to_blocks(
+ mut pa: usize,
+ mut va: usize,
+ mut length: usize,
+ ) -> Result<Vec<Block>, XlatError> {
+ let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
+
+ if length == 0 {
+ return Err(XlatError::InvalidParameterError(
+ "Length cannot be 0".to_string(),
+ ));
+ }
+
+ if pa & min_granule_mask != 0
+ || va & min_granule_mask != 0
+ || length & min_granule_mask != 0
+ {
+ return Err(XlatError::InvalidParameterError(format!(
+ "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
+ )));
+ }
+
+ let mut pages = Vec::new();
+
+ while length > 0 {
+ for granule in &Self::GRANULE_SIZES {
+ if (pa | va) & (*granule - 1) == 0 && length >= *granule {
+ pages.push(Block::new(pa, va, *granule));
+ pa += *granule;
+ va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
+
+ length -= *granule;
+ break;
+ }
+ }
+ }
+
+ Ok(pages)
+ }
+
+ /// Add block to memory mapping
+ /// # Arguments
+ /// * block: Memory block that can be represented by a single translation table entry
+ /// * attributes: Memory block's permissions, flags
+ fn map_block(&mut self, block: Block, attributes: Attributes) {
+ Self::set_block_descriptor_recursively(
+ attributes,
+ block.pa,
+ block.va,
+ block.granule,
+ 1,
+ self.base_table.descriptors.as_mut_slice(),
+ &self.page_pool,
+ );
+ }
+
+ /// Adds the block descriptor to the translation table along all the intermediate tables the
+ /// reach the required granule.
+ /// # Arguments
+ /// * attributes: Memory block's permssions, flags
+ /// * pa: Physical address
+ /// * va: Virtual address
+ /// * granule: Translation granule in bytes
+ /// * level: Translation table level
+ /// * table: Translation table on the given level
+ /// * page_pool: Page pool where the function can allocate pages for the translation tables
+ fn set_block_descriptor_recursively(
+ attributes: Attributes,
+ pa: usize,
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ page_pool: &PagePool,
+ ) {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ // We reached the required granule level
+ if Self::GRANULE_SIZES[level] == granule {
+ descriptor.set_block_descriptor(level, pa, attributes);
+ return;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+ unsafe {
+ let next_table = page.get_as_slice();
+ descriptor.set_table_descriptor(level, next_table, None);
+ }
+ Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ )
+ }
+ DescriptorType::Block => {
+ // Saving current descriptor details
+ let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
+ let current_pa = descriptor.get_block_output_address(level);
+ let current_attributes = descriptor.get_block_attributes(level);
+
+ // Replace block descriptor by table descriptor
+ let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+ unsafe {
+ let next_table = page.get_as_slice();
+ descriptor.set_table_descriptor(level, next_table, None);
+ }
+
+ // Explode block descriptor to table entries
+ for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
+ .step_by(Self::GRANULE_SIZES[level + 1])
+ {
+ let offset = exploded_va - current_va;
+ Self::set_block_descriptor_recursively(
+ current_attributes.clone(),
+ current_pa + offset,
+ exploded_va & (Self::GRANULE_SIZES[level] - 1),
+ Self::GRANULE_SIZES[level + 1],
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ )
+ }
+
+ // Invoke self to continue recursion on the newly created level
+ Self::set_block_descriptor_recursively(
+ attributes, pa, va, granule, level, table, page_pool,
+ );
+ }
+ DescriptorType::Table => Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ ),
+ }
+ }
+
+ /// Remove block from memory mapping
+ /// # Arguments
+ /// * block: memory block that can be represented by a single translation entry
+ fn unmap_block(&mut self, block: Block) {
+ Self::remove_block_descriptor_recursively(
+ block.va,
+ block.granule,
+ 1,
+ self.base_table.descriptors.as_mut_slice(),
+ &self.page_pool,
+ );
+ }
+
+ /// Removes block descriptor from the translation table along all the intermediate tables which
+ /// become empty during the removal process.
+ /// # Arguments
+ /// * va: Virtual address
+ /// * granule: Translation granule in bytes
+ /// * level: Translation table level
+ /// * table: Translation table on the given level
+ /// * page_pool: Page pool where the function can release the pages of empty tables
+ fn remove_block_descriptor_recursively(
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ page_pool: &PagePool,
+ ) {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ // We reached the required granule level
+ if Self::GRANULE_SIZES[level] == granule {
+ descriptor.set_block_descriptor_to_invalid(level);
+ return;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ panic!("Cannot remove block from non-existing table");
+ }
+ DescriptorType::Block => {
+ panic!("Cannot remove block with different granule");
+ }
+ DescriptorType::Table => {
+ let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
+ Self::remove_block_descriptor_recursively(
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ next_level_table,
+ page_pool,
+ );
+
+ if next_level_table.iter().all(|d| !d.is_valid()) {
+ // Empty table
+ let mut page = unsafe {
+ Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
+ };
+ page.zero_init();
+ page_pool.release_pages(page).unwrap();
+ }
+ }
+ }
+ }
+
+ fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
+ Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
+ }
+
+ fn walk_descriptors(
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ ) -> &mut Descriptor {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ if Self::GRANULE_SIZES[level] == granule {
+ return descriptor;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ panic!("Invalid descriptor");
+ }
+ DescriptorType::Block => {
+ panic!("Cannot split existing block descriptor to table");
+ }
+ DescriptorType::Table => Self::walk_descriptors(
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ ),
+ }
+ }
+}
+
+#[test]
+fn test_split_to_pages() {
+ let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
+ assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
+ assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
+ assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
+ assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
+ assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
+ assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
+ assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
+}
+
+#[test]
+fn test_split_to_pages_unaligned() {
+ let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
+ for (i, block) in pages.iter().enumerate().take(512) {
+ assert_eq!(
+ Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
+ *block
+ );
+ }
+}