blob: 9592c043c8a91fff100ef1cff6f2ca71b1f2cc1e [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
10use core::arch::asm;
11use core::iter::zip;
Imre Kisc1dab892024-03-26 12:03:58 +010012use core::ops::Range;
Imre Kis703482d2023-11-30 15:51:26 +010013use core::{fmt, panic};
14
15use alloc::boxed::Box;
16use alloc::format;
17use alloc::string::{String, ToString};
18use alloc::vec::Vec;
19use log::debug;
20
21use bitflags::bitflags;
22use packed_struct::prelude::*;
23
24use self::descriptor::DescriptorType;
25
26use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
27use self::kernel_space::KernelSpace;
28use self::page_pool::{Page, PagePool, Pages};
29use self::region::{PhysicalRegion, VirtualRegion};
30use self::region_pool::{Region, RegionPool, RegionPoolError};
31
32mod descriptor;
33pub mod kernel_space;
34pub mod page_pool;
35mod region;
36mod region_pool;
37
38/// The first level of memory descriptors table which
39#[repr(C, align(512))]
40pub struct BaseTable {
41 pub descriptors: [Descriptor; 64],
42}
43
44impl BaseTable {
45 pub fn new() -> Self {
46 BaseTable {
Imre Kisf5f6fa72024-04-18 14:04:21 +020047 descriptors: core::array::from_fn(|_| Descriptor::default()),
Imre Kis703482d2023-11-30 15:51:26 +010048 }
49 }
50}
51
52/// Translation table error type
53#[derive(Debug)]
54pub enum XlatError {
55 InvalidParameterError(String),
56 AllocationError(String),
57 AlignmentError(String),
58 Overflow,
59 InvalidOperation(String),
60 Overlap,
61 NotFound,
62 RegionPoolError(RegionPoolError),
63}
64
65/// Memory attributes
66///
67/// MAIR_EL1 should be configured in the same way in startup.s
68#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
69pub enum MemoryAttributesIndex {
70 #[default]
71 Device_nGnRnE = 0x00,
72 Normal_IWBWA_OWBWA = 0x01,
73}
74
75bitflags! {
76 #[derive(Debug, Clone, Copy)]
77 pub struct MemoryAccessRights : u32 {
78 const R = 0b00000001;
79 const W = 0b00000010;
80 const X = 0b00000100;
81 const NS = 0b00001000;
82
83 const RW = Self::R.bits() | Self::W.bits();
84 const RX = Self::R.bits() | Self::X.bits();
85 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
86
87 const USER = 0b00010000;
88 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010089 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010090 }
91}
92
93impl From<MemoryAccessRights> for Attributes {
94 fn from(access_rights: MemoryAccessRights) -> Self {
95 let data_access_permissions = match (
96 access_rights.contains(MemoryAccessRights::USER),
97 access_rights.contains(MemoryAccessRights::W),
98 ) {
99 (false, false) => DataAccessPermissions::ReadOnly_None,
100 (false, true) => DataAccessPermissions::ReadWrite_None,
101 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
102 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
103 };
104
105 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
106 MemoryAttributesIndex::Device_nGnRnE
107 } else {
108 MemoryAttributesIndex::Normal_IWBWA_OWBWA
109 };
110
111 Attributes {
112 uxn: !access_rights.contains(MemoryAccessRights::X)
113 || !access_rights.contains(MemoryAccessRights::USER),
114 pxn: !access_rights.contains(MemoryAccessRights::X)
115 || access_rights.contains(MemoryAccessRights::USER),
116 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100117 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100118 access_flag: true,
119 shareability: Shareability::NonShareable,
120 data_access_permissions,
121 non_secure: access_rights.contains(MemoryAccessRights::NS),
122 mem_attr_index,
123 }
124 }
125}
126
127#[derive(PartialEq)]
128struct Block {
129 pa: usize,
130 va: usize,
131 granule: usize,
132}
133
134impl Block {
135 fn new(pa: usize, va: usize, granule: usize) -> Self {
136 assert!(Xlat::GRANULE_SIZES.contains(&granule));
137 Self { pa, va, granule }
138 }
139}
140
141impl fmt::Debug for Block {
142 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
143 f.debug_struct("Block")
144 .field("pa", &format_args!("{:#010x}", self.pa))
145 .field("va", &format_args!("{:#010x}", self.va))
146 .field("granule", &format_args!("{:#010x}", self.granule))
147 .finish()
148 }
149}
150
Imre Kisc1dab892024-03-26 12:03:58 +0100151/// Enum for selecting TTBR0_EL1 or TTBR1_EL1
152pub enum TTBR {
153 TTBR0_EL1,
154 TTBR1_EL1,
155}
156
Imre Kis703482d2023-11-30 15:51:26 +0100157pub struct Xlat {
158 base_table: Box<BaseTable>,
159 page_pool: PagePool,
160 regions: RegionPool<VirtualRegion>,
161}
162
163/// Memory translation table handling
164/// # High level interface
165/// * allocate and map zero initialized region (with or without VA)
166/// * allocate and map memory region and load contents (with or without VA)
167/// * map memory region by PA (with or without VA)
168/// * unmap memory region by PA
169/// * query PA by VA
170/// * set access rights of mapped memory areas
171/// * active mapping
172///
173/// # Debug features
174/// * print translation table details
175///
176/// # Region level interface
177/// * map regions
178/// * unmap region
179/// * find a mapped region which contains
180/// * find empty area for region
181/// * set access rights for a region
182/// * create blocks by region
183///
184/// # Block level interface
185/// * map block
186/// * unmap block
187/// * set access rights of block
188impl Xlat {
Imre Kis703482d2023-11-30 15:51:26 +0100189 pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
190
Imre Kisc1dab892024-03-26 12:03:58 +0100191 pub fn new(page_pool: PagePool, va_range: Range<usize>) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100192 let mut regions = RegionPool::new();
193 regions
Imre Kisc1dab892024-03-26 12:03:58 +0100194 .add(VirtualRegion::new(va_range.start, va_range.len()))
Imre Kis703482d2023-11-30 15:51:26 +0100195 .unwrap();
196 Self {
197 base_table: Box::new(BaseTable::new()),
198 page_pool,
199 regions,
200 }
201 }
202
203 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
204 /// initial data
205 /// # Arguments
206 /// * va: Virtual address of the memory area
207 /// * data: Data to be loaded to the memory area
208 /// * access_rights: Memory access rights of the area
209 /// # Return value
210 /// * Virtual address of the mapped memory
211 pub fn allocate_initalized_range(
212 &mut self,
213 va: Option<usize>,
214 data: &[u8],
215 access_rights: MemoryAccessRights,
216 ) -> Result<usize, XlatError> {
217 let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
218 XlatError::AllocationError(format!(
219 "Cannot allocate pages for {} bytes ({:?})",
220 data.len(),
221 e
222 ))
223 })?;
224
225 pages.copy_data_to_page(data);
226
227 let pages_length = pages.length();
228 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
229 let region = if let Some(required_va) = va {
230 self.regions
231 .acquire(required_va, pages_length, physical_region)
232 } else {
233 self.regions.allocate(pages_length, physical_region)
234 }
235 .map_err(XlatError::RegionPoolError)?;
236
237 self.map_region(region, access_rights.into())
238 }
239
240 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
241 /// # Arguments
242 /// * va: Virtual address of the memory area
243 /// * length: Length of the memory area in bytes
244 /// * access_rights: Memory access rights of the area
245 /// # Return value
246 /// * Virtual address of the mapped memory
247 pub fn allocate_zero_init_range(
248 &mut self,
249 va: Option<usize>,
250 length: usize,
251 access_rights: MemoryAccessRights,
252 ) -> Result<usize, XlatError> {
253 let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
254 XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
255 })?;
256
257 pages.zero_init();
258
259 let pages_length = pages.length();
260 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
261 let region = if let Some(required_va) = va {
262 self.regions
263 .acquire(required_va, pages_length, physical_region)
264 } else {
265 self.regions.allocate(pages_length, physical_region)
266 }
267 .map_err(XlatError::RegionPoolError)?;
268
269 self.map_region(region, access_rights.into())
270 }
271
272 /// Map memory area by physical address
273 /// # Arguments
274 /// * va: Virtual address of the memory area
275 /// * pa: Physical address of the memory area
276 /// * length: Length of the memory area in bytes
277 /// * access_rights: Memory access rights of the area
278 /// # Return value
279 /// * Virtual address of the mapped memory
280 pub fn map_physical_address_range(
281 &mut self,
282 va: Option<usize>,
283 pa: usize,
284 length: usize,
285 access_rights: MemoryAccessRights,
286 ) -> Result<usize, XlatError> {
287 let resource = PhysicalRegion::PhysicalAddress(pa);
288 let region = if let Some(required_va) = va {
289 self.regions.acquire(required_va, length, resource)
290 } else {
291 self.regions.allocate(length, resource)
292 }
293 .map_err(XlatError::RegionPoolError)?;
294
295 self.map_region(region, access_rights.into())
296 }
297
298 /// Unmap memory area by virtual address
299 /// # Arguments
300 /// * va: Virtual address
301 /// * length: Length of the memory area in bytes
302 pub fn unmap_virtual_address_range(
303 &mut self,
304 va: usize,
305 length: usize,
306 ) -> Result<(), XlatError> {
307 let pa = self.get_pa_by_va(va, length)?;
308
309 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
310
311 self.unmap_region(&region_to_release)?;
312
313 self.regions
314 .release(region_to_release)
315 .map_err(XlatError::RegionPoolError)
316 }
317
318 /// Query physical address by virtual address range. Only returns a value if the memory area
319 /// mapped as continuous area.
320 /// # Arguments
321 /// * va: Virtual address of the memory area
322 /// * length: Length of the memory area in bytes
323 /// # Return value
324 /// * Physical address of the mapped memory
325 pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
326 let containing_region = self
327 .find_containing_region(va, length)
328 .ok_or(XlatError::NotFound)?;
329
330 if !containing_region.used() {
331 return Err(XlatError::NotFound);
332 }
333
334 Ok(containing_region.get_pa_for_va(va))
335 }
336
337 /// Sets the memory access right of memory area
338 /// # Arguments
339 /// * va: Virtual address of the memory area
340 /// * length: Length of the memory area in bytes
341 /// * access_rights: New memory access rights of the area
342 pub fn set_access_rights(
343 &mut self,
344 va: usize,
345 length: usize,
346 access_rights: MemoryAccessRights,
347 ) -> Result<(), XlatError> {
348 let containing_region = self
349 .find_containing_region(va, length)
350 .ok_or(XlatError::NotFound)?;
351
352 if !containing_region.used() {
353 return Err(XlatError::NotFound);
354 }
355
356 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
357 self.map_region(region, access_rights.into())?;
358
359 Ok(())
360 }
361
362 /// Activate memory mapping represented by the object
363 /// # Arguments
364 /// * asid: ASID of the table base address
Imre Kisc1dab892024-03-26 12:03:58 +0100365 /// * ttbr: Selects TTBR0_EL1/TTBR1_EL1
366 pub fn activate(&self, asid: u8, ttbr: TTBR) {
Imre Kis703482d2023-11-30 15:51:26 +0100367 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100368 let ttbr_value = ((asid as u64) << 48) | base_table_pa;
369 #[cfg(target_arch = "aarch64")]
370 match ttbr {
371 TTBR::TTBR0_EL1 => unsafe {
372 asm!(
373 "msr ttbr0_el1, {0}
374 isb",
375 in(reg) ttbr_value)
376 },
377 TTBR::TTBR1_EL1 => unsafe {
378 asm!(
379 "msr ttbr1_el1, {0}
380 isb
381
382 tlbi vmalle1
383 dsb sy
384 isb",
385 in(reg) ttbr_value)
386 },
387 }
Imre Kis703482d2023-11-30 15:51:26 +0100388 }
389
390 /// Prints the translation tables to debug console recursively
391 pub fn print(&self) {
392 debug!(
393 "Xlat table -> {:#010x}",
394 self.base_table.descriptors.as_ptr() as u64
395 );
396 Self::print_table(1, 0, &self.base_table.descriptors);
397 }
398
399 /// Prints a single translation table to the debug console
400 /// # Arguments
401 /// * level: Level of the translation table
402 /// * va: Base virtual address of the table
403 /// * table: Table entries
404 pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
405 let level_prefix = match level {
406 0 | 1 => "|-",
407 2 => "| |-",
408 _ => "| | |-",
409 };
410
411 for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
412 match descriptor.get_descriptor_type(level) {
413 DescriptorType::Block => debug!(
414 "{} {:#010x} Block -> {:#010x}",
415 level_prefix,
416 va,
417 descriptor.get_block_output_address(level)
418 ),
419 DescriptorType::Table => {
420 let next_level_table = unsafe { descriptor.get_next_level_table(level) };
421 debug!(
422 "{} {:#010x} Table -> {:#010x}",
423 level_prefix,
424 va,
425 next_level_table.as_ptr() as usize
426 );
427 Self::print_table(level + 1, va, next_level_table);
428 }
429 _ => {}
430 }
431 }
432 }
433
434 /// Adds memory region from the translation table. The function splits the region to blocks and
435 /// uses the block level functions to do the mapping.
436 /// # Arguments
437 /// * region: Memory region object
438 /// # Return value
439 /// * Virtual address of the mapped memory
440 fn map_region(
441 &mut self,
442 region: VirtualRegion,
443 attributes: Attributes,
444 ) -> Result<usize, XlatError> {
445 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
446 for block in blocks {
447 self.map_block(block, attributes.clone());
448 }
449
450 Ok(region.base())
451 }
452
453 /// Remove memory region from the translation table. The function splits the region to blocks
454 /// and uses the block level functions to do the unmapping.
455 /// # Arguments
456 /// * region: Memory region object
457 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
458 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
459 for block in blocks {
460 self.unmap_block(block);
461 }
462
463 Ok(())
464 }
465
466 /// Find mapped region that contains the whole region
467 /// # Arguments
468 /// * region: Virtual address to look for
469 /// # Return value
470 /// * Reference to virtual region if found
471 fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
472 self.regions.find_containing_region(va, length).ok()
473 }
474
475 /// Splits memory region to blocks that matches the granule size of the translation table.
476 /// # Arguments
477 /// * pa: Physical address
478 /// * va: Virtual address
479 /// * length: Region size in bytes
480 /// # Return value
481 /// * Vector of granule sized blocks
482 fn split_region_to_blocks(
483 mut pa: usize,
484 mut va: usize,
485 mut length: usize,
486 ) -> Result<Vec<Block>, XlatError> {
487 let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
488
489 if length == 0 {
490 return Err(XlatError::InvalidParameterError(
491 "Length cannot be 0".to_string(),
492 ));
493 }
494
495 if pa & min_granule_mask != 0
496 || va & min_granule_mask != 0
497 || length & min_granule_mask != 0
498 {
499 return Err(XlatError::InvalidParameterError(format!(
500 "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
501 )));
502 }
503
504 let mut pages = Vec::new();
505
506 while length > 0 {
507 for granule in &Self::GRANULE_SIZES {
508 if (pa | va) & (*granule - 1) == 0 && length >= *granule {
509 pages.push(Block::new(pa, va, *granule));
510 pa += *granule;
511 va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
512
513 length -= *granule;
514 break;
515 }
516 }
517 }
518
519 Ok(pages)
520 }
521
522 /// Add block to memory mapping
523 /// # Arguments
524 /// * block: Memory block that can be represented by a single translation table entry
525 /// * attributes: Memory block's permissions, flags
526 fn map_block(&mut self, block: Block, attributes: Attributes) {
527 Self::set_block_descriptor_recursively(
528 attributes,
529 block.pa,
530 block.va,
531 block.granule,
532 1,
533 self.base_table.descriptors.as_mut_slice(),
534 &self.page_pool,
535 );
536 }
537
538 /// Adds the block descriptor to the translation table along all the intermediate tables the
539 /// reach the required granule.
540 /// # Arguments
541 /// * attributes: Memory block's permssions, flags
542 /// * pa: Physical address
543 /// * va: Virtual address
544 /// * granule: Translation granule in bytes
545 /// * level: Translation table level
546 /// * table: Translation table on the given level
547 /// * page_pool: Page pool where the function can allocate pages for the translation tables
548 fn set_block_descriptor_recursively(
549 attributes: Attributes,
550 pa: usize,
551 va: usize,
552 granule: usize,
553 level: usize,
554 table: &mut [Descriptor],
555 page_pool: &PagePool,
556 ) {
557 // Get descriptor of the current level
558 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
559
560 // We reached the required granule level
561 if Self::GRANULE_SIZES[level] == granule {
562 descriptor.set_block_descriptor(level, pa, attributes);
563 return;
564 }
565
566 // Need to iterate forward
567 match descriptor.get_descriptor_type(level) {
568 DescriptorType::Invalid => {
569 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
570 unsafe {
571 let next_table = page.get_as_slice();
572 descriptor.set_table_descriptor(level, next_table, None);
573 }
574 Self::set_block_descriptor_recursively(
575 attributes,
576 pa,
577 va & (Self::GRANULE_SIZES[level] - 1),
578 granule,
579 level + 1,
580 unsafe { descriptor.get_next_level_table_mut(level) },
581 page_pool,
582 )
583 }
584 DescriptorType::Block => {
585 // Saving current descriptor details
586 let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
587 let current_pa = descriptor.get_block_output_address(level);
588 let current_attributes = descriptor.get_block_attributes(level);
589
590 // Replace block descriptor by table descriptor
591 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
592 unsafe {
593 let next_table = page.get_as_slice();
594 descriptor.set_table_descriptor(level, next_table, None);
595 }
596
597 // Explode block descriptor to table entries
598 for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
599 .step_by(Self::GRANULE_SIZES[level + 1])
600 {
601 let offset = exploded_va - current_va;
602 Self::set_block_descriptor_recursively(
603 current_attributes.clone(),
604 current_pa + offset,
605 exploded_va & (Self::GRANULE_SIZES[level] - 1),
606 Self::GRANULE_SIZES[level + 1],
607 level + 1,
608 unsafe { descriptor.get_next_level_table_mut(level) },
609 page_pool,
610 )
611 }
612
613 // Invoke self to continue recursion on the newly created level
614 Self::set_block_descriptor_recursively(
615 attributes, pa, va, granule, level, table, page_pool,
616 );
617 }
618 DescriptorType::Table => Self::set_block_descriptor_recursively(
619 attributes,
620 pa,
621 va & (Self::GRANULE_SIZES[level] - 1),
622 granule,
623 level + 1,
624 unsafe { descriptor.get_next_level_table_mut(level) },
625 page_pool,
626 ),
627 }
628 }
629
630 /// Remove block from memory mapping
631 /// # Arguments
632 /// * block: memory block that can be represented by a single translation entry
633 fn unmap_block(&mut self, block: Block) {
634 Self::remove_block_descriptor_recursively(
635 block.va,
636 block.granule,
637 1,
638 self.base_table.descriptors.as_mut_slice(),
639 &self.page_pool,
640 );
641 }
642
643 /// Removes block descriptor from the translation table along all the intermediate tables which
644 /// become empty during the removal process.
645 /// # Arguments
646 /// * va: Virtual address
647 /// * granule: Translation granule in bytes
648 /// * level: Translation table level
649 /// * table: Translation table on the given level
650 /// * page_pool: Page pool where the function can release the pages of empty tables
651 fn remove_block_descriptor_recursively(
652 va: usize,
653 granule: usize,
654 level: usize,
655 table: &mut [Descriptor],
656 page_pool: &PagePool,
657 ) {
658 // Get descriptor of the current level
659 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
660
661 // We reached the required granule level
662 if Self::GRANULE_SIZES[level] == granule {
663 descriptor.set_block_descriptor_to_invalid(level);
664 return;
665 }
666
667 // Need to iterate forward
668 match descriptor.get_descriptor_type(level) {
669 DescriptorType::Invalid => {
670 panic!("Cannot remove block from non-existing table");
671 }
672 DescriptorType::Block => {
673 panic!("Cannot remove block with different granule");
674 }
675 DescriptorType::Table => {
676 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
677 Self::remove_block_descriptor_recursively(
678 va & (Self::GRANULE_SIZES[level] - 1),
679 granule,
680 level + 1,
681 next_level_table,
682 page_pool,
683 );
684
685 if next_level_table.iter().all(|d| !d.is_valid()) {
686 // Empty table
687 let mut page = unsafe {
688 Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
689 };
690 page.zero_init();
691 page_pool.release_pages(page).unwrap();
692 }
693 }
694 }
695 }
696
697 fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
698 Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
699 }
700
701 fn walk_descriptors(
702 va: usize,
703 granule: usize,
704 level: usize,
705 table: &mut [Descriptor],
706 ) -> &mut Descriptor {
707 // Get descriptor of the current level
708 let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
709
710 if Self::GRANULE_SIZES[level] == granule {
711 return descriptor;
712 }
713
714 // Need to iterate forward
715 match descriptor.get_descriptor_type(level) {
716 DescriptorType::Invalid => {
717 panic!("Invalid descriptor");
718 }
719 DescriptorType::Block => {
720 panic!("Cannot split existing block descriptor to table");
721 }
722 DescriptorType::Table => Self::walk_descriptors(
723 va & (Self::GRANULE_SIZES[level] - 1),
724 granule,
725 level + 1,
726 unsafe { descriptor.get_next_level_table_mut(level) },
727 ),
728 }
729 }
730}
731
732#[test]
733fn test_split_to_pages() {
734 let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
735 assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
736 assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
737 assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
738 assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
739 assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
740 assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
741 assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
742}
743
744#[test]
745fn test_split_to_pages_unaligned() {
746 let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
747 for (i, block) in pages.iter().enumerate().take(512) {
748 assert_eq!(
749 Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
750 *block
751 );
752 }
753}