blob: dd7de754042948cb3b7587ccde5a4dbce617cd7d [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
11use core::{fmt, panic};
12
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis703482d2023-11-30 15:51:26 +010014use alloc::boxed::Box;
15use alloc::format;
16use alloc::string::{String, ToString};
17use alloc::vec::Vec;
18use log::debug;
19
20use bitflags::bitflags;
21use packed_struct::prelude::*;
22
23use self::descriptor::DescriptorType;
24
25use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
26use self::kernel_space::KernelSpace;
27use self::page_pool::{Page, PagePool, Pages};
28use self::region::{PhysicalRegion, VirtualRegion};
29use self::region_pool::{Region, RegionPool, RegionPoolError};
30
Imre Kisd5b96fd2024-09-11 17:04:32 +020031pub mod address;
Imre Kis703482d2023-11-30 15:51:26 +010032mod descriptor;
33pub mod kernel_space;
34pub mod page_pool;
35mod region;
36mod region_pool;
37
38/// The first level of memory descriptors table which
39#[repr(C, align(512))]
40pub struct BaseTable {
41 pub descriptors: [Descriptor; 64],
42}
43
44impl BaseTable {
45 pub fn new() -> Self {
46 BaseTable {
Imre Kisf5f6fa72024-04-18 14:04:21 +020047 descriptors: core::array::from_fn(|_| Descriptor::default()),
Imre Kis703482d2023-11-30 15:51:26 +010048 }
49 }
50}
51
52/// Translation table error type
53#[derive(Debug)]
54pub enum XlatError {
55 InvalidParameterError(String),
56 AllocationError(String),
57 AlignmentError(String),
58 Overflow,
59 InvalidOperation(String),
60 Overlap,
61 NotFound,
62 RegionPoolError(RegionPoolError),
63}
64
65/// Memory attributes
66///
67/// MAIR_EL1 should be configured in the same way in startup.s
68#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
69pub enum MemoryAttributesIndex {
70 #[default]
71 Device_nGnRnE = 0x00,
72 Normal_IWBWA_OWBWA = 0x01,
73}
74
75bitflags! {
76 #[derive(Debug, Clone, Copy)]
77 pub struct MemoryAccessRights : u32 {
78 const R = 0b00000001;
79 const W = 0b00000010;
80 const X = 0b00000100;
81 const NS = 0b00001000;
82
83 const RW = Self::R.bits() | Self::W.bits();
84 const RX = Self::R.bits() | Self::X.bits();
85 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
86
87 const USER = 0b00010000;
88 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010089 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010090 }
91}
92
93impl From<MemoryAccessRights> for Attributes {
94 fn from(access_rights: MemoryAccessRights) -> Self {
95 let data_access_permissions = match (
96 access_rights.contains(MemoryAccessRights::USER),
97 access_rights.contains(MemoryAccessRights::W),
98 ) {
99 (false, false) => DataAccessPermissions::ReadOnly_None,
100 (false, true) => DataAccessPermissions::ReadWrite_None,
101 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
102 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
103 };
104
105 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
106 MemoryAttributesIndex::Device_nGnRnE
107 } else {
108 MemoryAttributesIndex::Normal_IWBWA_OWBWA
109 };
110
111 Attributes {
112 uxn: !access_rights.contains(MemoryAccessRights::X)
113 || !access_rights.contains(MemoryAccessRights::USER),
114 pxn: !access_rights.contains(MemoryAccessRights::X)
115 || access_rights.contains(MemoryAccessRights::USER),
116 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100117 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100118 access_flag: true,
119 shareability: Shareability::NonShareable,
120 data_access_permissions,
121 non_secure: access_rights.contains(MemoryAccessRights::NS),
122 mem_attr_index,
123 }
124 }
125}
126
127#[derive(PartialEq)]
128struct Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200129 pa: PhysicalAddress,
130 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100131 granule: usize,
132}
133
134impl Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200135 fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100136 assert!(Xlat::GRANULE_SIZES.contains(&granule));
137 Self { pa, va, granule }
138 }
139}
140
141impl fmt::Debug for Block {
142 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
143 f.debug_struct("Block")
Imre Kisd5b96fd2024-09-11 17:04:32 +0200144 .field("pa", &format_args!("{:#010x}", self.pa.0))
145 .field("va", &format_args!("{:#010x}", self.va.0))
Imre Kis703482d2023-11-30 15:51:26 +0100146 .field("granule", &format_args!("{:#010x}", self.granule))
147 .finish()
148 }
149}
150
Imre Kisb5146b52024-10-31 14:03:06 +0100151pub enum RegimeVaRange {
152 Lower,
153 Upper,
154}
155
156pub enum TranslationRegime {
157 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
158 #[cfg(target_feature = "vh")]
159 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
160 EL2, // EL2
161 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100162}
163
Imre Kis703482d2023-11-30 15:51:26 +0100164pub struct Xlat {
165 base_table: Box<BaseTable>,
166 page_pool: PagePool,
167 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100168 regime: TranslationRegime,
Imre Kis703482d2023-11-30 15:51:26 +0100169}
170
171/// Memory translation table handling
172/// # High level interface
173/// * allocate and map zero initialized region (with or without VA)
174/// * allocate and map memory region and load contents (with or without VA)
175/// * map memory region by PA (with or without VA)
176/// * unmap memory region by PA
177/// * query PA by VA
178/// * set access rights of mapped memory areas
179/// * active mapping
180///
181/// # Debug features
182/// * print translation table details
183///
184/// # Region level interface
185/// * map regions
186/// * unmap region
187/// * find a mapped region which contains
188/// * find empty area for region
189/// * set access rights for a region
190/// * create blocks by region
191///
192/// # Block level interface
193/// * map block
194/// * unmap block
195/// * set access rights of block
196impl Xlat {
Imre Kis703482d2023-11-30 15:51:26 +0100197 pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
198
Imre Kisb5146b52024-10-31 14:03:06 +0100199 pub fn new(
200 page_pool: PagePool,
201 address: VirtualAddressRange,
202 regime: TranslationRegime,
203 ) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100204 let mut regions = RegionPool::new();
205 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100206 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100207 .unwrap();
208 Self {
209 base_table: Box::new(BaseTable::new()),
210 page_pool,
211 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100212 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100213 }
214 }
215
216 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
217 /// initial data
218 /// # Arguments
219 /// * va: Virtual address of the memory area
220 /// * data: Data to be loaded to the memory area
221 /// * access_rights: Memory access rights of the area
222 /// # Return value
223 /// * Virtual address of the mapped memory
224 pub fn allocate_initalized_range(
225 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200226 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100227 data: &[u8],
228 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200229 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100230 let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
231 XlatError::AllocationError(format!(
232 "Cannot allocate pages for {} bytes ({:?})",
233 data.len(),
234 e
235 ))
236 })?;
237
238 pages.copy_data_to_page(data);
239
240 let pages_length = pages.length();
241 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
242 let region = if let Some(required_va) = va {
243 self.regions
244 .acquire(required_va, pages_length, physical_region)
245 } else {
246 self.regions.allocate(pages_length, physical_region)
247 }
248 .map_err(XlatError::RegionPoolError)?;
249
250 self.map_region(region, access_rights.into())
251 }
252
253 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
254 /// # Arguments
255 /// * va: Virtual address of the memory area
256 /// * length: Length of the memory area in bytes
257 /// * access_rights: Memory access rights of the area
258 /// # Return value
259 /// * Virtual address of the mapped memory
260 pub fn allocate_zero_init_range(
261 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200262 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100263 length: usize,
264 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200265 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100266 let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
267 XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
268 })?;
269
270 pages.zero_init();
271
272 let pages_length = pages.length();
273 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
274 let region = if let Some(required_va) = va {
275 self.regions
276 .acquire(required_va, pages_length, physical_region)
277 } else {
278 self.regions.allocate(pages_length, physical_region)
279 }
280 .map_err(XlatError::RegionPoolError)?;
281
282 self.map_region(region, access_rights.into())
283 }
284
285 /// Map memory area by physical address
286 /// # Arguments
287 /// * va: Virtual address of the memory area
288 /// * pa: Physical address of the memory area
289 /// * length: Length of the memory area in bytes
290 /// * access_rights: Memory access rights of the area
291 /// # Return value
292 /// * Virtual address of the mapped memory
293 pub fn map_physical_address_range(
294 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200295 va: Option<VirtualAddress>,
296 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100297 length: usize,
298 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200299 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100300 let resource = PhysicalRegion::PhysicalAddress(pa);
301 let region = if let Some(required_va) = va {
302 self.regions.acquire(required_va, length, resource)
303 } else {
304 self.regions.allocate(length, resource)
305 }
306 .map_err(XlatError::RegionPoolError)?;
307
308 self.map_region(region, access_rights.into())
309 }
310
311 /// Unmap memory area by virtual address
312 /// # Arguments
313 /// * va: Virtual address
314 /// * length: Length of the memory area in bytes
315 pub fn unmap_virtual_address_range(
316 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200317 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100318 length: usize,
319 ) -> Result<(), XlatError> {
320 let pa = self.get_pa_by_va(va, length)?;
321
322 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
323
324 self.unmap_region(&region_to_release)?;
325
326 self.regions
327 .release(region_to_release)
328 .map_err(XlatError::RegionPoolError)
329 }
330
331 /// Query physical address by virtual address range. Only returns a value if the memory area
332 /// mapped as continuous area.
333 /// # Arguments
334 /// * va: Virtual address of the memory area
335 /// * length: Length of the memory area in bytes
336 /// # Return value
337 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200338 pub fn get_pa_by_va(
339 &self,
340 va: VirtualAddress,
341 length: usize,
342 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100343 let containing_region = self
344 .find_containing_region(va, length)
345 .ok_or(XlatError::NotFound)?;
346
347 if !containing_region.used() {
348 return Err(XlatError::NotFound);
349 }
350
351 Ok(containing_region.get_pa_for_va(va))
352 }
353
354 /// Sets the memory access right of memory area
355 /// # Arguments
356 /// * va: Virtual address of the memory area
357 /// * length: Length of the memory area in bytes
358 /// * access_rights: New memory access rights of the area
359 pub fn set_access_rights(
360 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200361 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100362 length: usize,
363 access_rights: MemoryAccessRights,
364 ) -> Result<(), XlatError> {
365 let containing_region = self
366 .find_containing_region(va, length)
367 .ok_or(XlatError::NotFound)?;
368
369 if !containing_region.used() {
370 return Err(XlatError::NotFound);
371 }
372
373 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
374 self.map_region(region, access_rights.into())?;
375
376 Ok(())
377 }
378
379 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100380 ///
381 /// # Safety
382 /// When activating memory mapping for the running exception level, the
383 /// caller must ensure that the new mapping will not break any existing
384 /// references. After activation the caller must ensure that there are no
385 /// active references when unmapping memory.
386 pub unsafe fn activate(&self) {
Imre Kis703482d2023-11-30 15:51:26 +0100387 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100388
Imre Kisb5146b52024-10-31 14:03:06 +0100389 #[cfg(target_arch = "aarch64")]
390 match &self.regime {
391 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
392 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100393 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100394 in(reg) ((*asid as u64) << 48) | base_table_pa),
395 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
396 "msr ttbr1_el1, {0}
397 isb",
398 in(reg) ((*asid as u64) << 48) | base_table_pa),
399 #[cfg(target_feature = "vh")]
400 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
401 "msr ttbr0_el2, {0}
402 isb",
403 in(reg) ((*asid as u64) << 48) | base_table_pa),
404 #[cfg(target_feature = "vh")]
405 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
406 "msr ttbr1_el2, {0}
407 isb",
408 in(reg) ((*asid as u64) << 48) | base_table_pa),
409 TranslationRegime::EL2 => core::arch::asm!(
410 "msr ttbr0_el2, {0}
411 isb",
412 in(reg) base_table_pa),
413 TranslationRegime::EL3 => core::arch::asm!(
414 "msr ttbr0_el3, {0}
415 isb",
416 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100417 }
Imre Kis703482d2023-11-30 15:51:26 +0100418 }
419
420 /// Prints the translation tables to debug console recursively
421 pub fn print(&self) {
422 debug!(
423 "Xlat table -> {:#010x}",
424 self.base_table.descriptors.as_ptr() as u64
425 );
426 Self::print_table(1, 0, &self.base_table.descriptors);
427 }
428
429 /// Prints a single translation table to the debug console
430 /// # Arguments
431 /// * level: Level of the translation table
432 /// * va: Base virtual address of the table
433 /// * table: Table entries
434 pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
435 let level_prefix = match level {
436 0 | 1 => "|-",
437 2 => "| |-",
438 _ => "| | |-",
439 };
440
441 for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
442 match descriptor.get_descriptor_type(level) {
443 DescriptorType::Block => debug!(
444 "{} {:#010x} Block -> {:#010x}",
445 level_prefix,
446 va,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200447 descriptor.get_block_output_address(level).0
Imre Kis703482d2023-11-30 15:51:26 +0100448 ),
449 DescriptorType::Table => {
450 let next_level_table = unsafe { descriptor.get_next_level_table(level) };
451 debug!(
452 "{} {:#010x} Table -> {:#010x}",
453 level_prefix,
454 va,
455 next_level_table.as_ptr() as usize
456 );
457 Self::print_table(level + 1, va, next_level_table);
458 }
459 _ => {}
460 }
461 }
462 }
463
464 /// Adds memory region from the translation table. The function splits the region to blocks and
465 /// uses the block level functions to do the mapping.
466 /// # Arguments
467 /// * region: Memory region object
468 /// # Return value
469 /// * Virtual address of the mapped memory
470 fn map_region(
471 &mut self,
472 region: VirtualRegion,
473 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200474 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100475 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
476 for block in blocks {
477 self.map_block(block, attributes.clone());
478 }
479
480 Ok(region.base())
481 }
482
483 /// Remove memory region from the translation table. The function splits the region to blocks
484 /// and uses the block level functions to do the unmapping.
485 /// # Arguments
486 /// * region: Memory region object
487 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
488 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
489 for block in blocks {
490 self.unmap_block(block);
491 }
492
493 Ok(())
494 }
495
496 /// Find mapped region that contains the whole region
497 /// # Arguments
498 /// * region: Virtual address to look for
499 /// # Return value
500 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200501 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100502 self.regions.find_containing_region(va, length).ok()
503 }
504
505 /// Splits memory region to blocks that matches the granule size of the translation table.
506 /// # Arguments
507 /// * pa: Physical address
508 /// * va: Virtual address
509 /// * length: Region size in bytes
510 /// # Return value
511 /// * Vector of granule sized blocks
512 fn split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200513 mut pa: PhysicalAddress,
514 mut va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100515 mut length: usize,
516 ) -> Result<Vec<Block>, XlatError> {
517 let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
518
519 if length == 0 {
520 return Err(XlatError::InvalidParameterError(
521 "Length cannot be 0".to_string(),
522 ));
523 }
524
Imre Kisd5b96fd2024-09-11 17:04:32 +0200525 if (pa.0 | va.0 | length) & min_granule_mask != 0 {
Imre Kis703482d2023-11-30 15:51:26 +0100526 return Err(XlatError::InvalidParameterError(format!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200527 "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
528 pa.0, va.0, length
Imre Kis703482d2023-11-30 15:51:26 +0100529 )));
530 }
531
532 let mut pages = Vec::new();
533
534 while length > 0 {
535 for granule in &Self::GRANULE_SIZES {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200536 if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
Imre Kis703482d2023-11-30 15:51:26 +0100537 pages.push(Block::new(pa, va, *granule));
Imre Kisd5b96fd2024-09-11 17:04:32 +0200538 pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
539 va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
Imre Kis703482d2023-11-30 15:51:26 +0100540
541 length -= *granule;
542 break;
543 }
544 }
545 }
546
547 Ok(pages)
548 }
549
550 /// Add block to memory mapping
551 /// # Arguments
552 /// * block: Memory block that can be represented by a single translation table entry
553 /// * attributes: Memory block's permissions, flags
554 fn map_block(&mut self, block: Block, attributes: Attributes) {
555 Self::set_block_descriptor_recursively(
556 attributes,
557 block.pa,
558 block.va,
559 block.granule,
560 1,
561 self.base_table.descriptors.as_mut_slice(),
562 &self.page_pool,
563 );
564 }
565
566 /// Adds the block descriptor to the translation table along all the intermediate tables the
567 /// reach the required granule.
568 /// # Arguments
569 /// * attributes: Memory block's permssions, flags
570 /// * pa: Physical address
571 /// * va: Virtual address
572 /// * granule: Translation granule in bytes
573 /// * level: Translation table level
574 /// * table: Translation table on the given level
575 /// * page_pool: Page pool where the function can allocate pages for the translation tables
576 fn set_block_descriptor_recursively(
577 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200578 pa: PhysicalAddress,
579 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100580 granule: usize,
581 level: usize,
582 table: &mut [Descriptor],
583 page_pool: &PagePool,
584 ) {
585 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200586 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100587
588 // We reached the required granule level
589 if Self::GRANULE_SIZES[level] == granule {
590 descriptor.set_block_descriptor(level, pa, attributes);
591 return;
592 }
593
594 // Need to iterate forward
595 match descriptor.get_descriptor_type(level) {
596 DescriptorType::Invalid => {
597 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
598 unsafe {
599 let next_table = page.get_as_slice();
600 descriptor.set_table_descriptor(level, next_table, None);
601 }
602 Self::set_block_descriptor_recursively(
603 attributes,
604 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200605 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100606 granule,
607 level + 1,
608 unsafe { descriptor.get_next_level_table_mut(level) },
609 page_pool,
610 )
611 }
612 DescriptorType::Block => {
613 // Saving current descriptor details
Imre Kisd5b96fd2024-09-11 17:04:32 +0200614 let current_va = va.mask_for_level(level);
Imre Kis703482d2023-11-30 15:51:26 +0100615 let current_pa = descriptor.get_block_output_address(level);
616 let current_attributes = descriptor.get_block_attributes(level);
617
618 // Replace block descriptor by table descriptor
619 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
620 unsafe {
621 let next_table = page.get_as_slice();
622 descriptor.set_table_descriptor(level, next_table, None);
623 }
624
625 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200626 for exploded_va in VirtualAddressRange::new(
627 current_va,
628 current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
629 )
630 .step_by(Self::GRANULE_SIZES[level + 1])
Imre Kis703482d2023-11-30 15:51:26 +0100631 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200632 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100633 Self::set_block_descriptor_recursively(
634 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200635 current_pa.add_offset(offset).unwrap(),
636 exploded_va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100637 Self::GRANULE_SIZES[level + 1],
638 level + 1,
639 unsafe { descriptor.get_next_level_table_mut(level) },
640 page_pool,
641 )
642 }
643
644 // Invoke self to continue recursion on the newly created level
645 Self::set_block_descriptor_recursively(
646 attributes, pa, va, granule, level, table, page_pool,
647 );
648 }
649 DescriptorType::Table => Self::set_block_descriptor_recursively(
650 attributes,
651 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200652 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100653 granule,
654 level + 1,
655 unsafe { descriptor.get_next_level_table_mut(level) },
656 page_pool,
657 ),
658 }
659 }
660
661 /// Remove block from memory mapping
662 /// # Arguments
663 /// * block: memory block that can be represented by a single translation entry
664 fn unmap_block(&mut self, block: Block) {
665 Self::remove_block_descriptor_recursively(
666 block.va,
667 block.granule,
668 1,
669 self.base_table.descriptors.as_mut_slice(),
670 &self.page_pool,
671 );
672 }
673
674 /// Removes block descriptor from the translation table along all the intermediate tables which
675 /// become empty during the removal process.
676 /// # Arguments
677 /// * va: Virtual address
678 /// * granule: Translation granule in bytes
679 /// * level: Translation table level
680 /// * table: Translation table on the given level
681 /// * page_pool: Page pool where the function can release the pages of empty tables
682 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200683 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100684 granule: usize,
685 level: usize,
686 table: &mut [Descriptor],
687 page_pool: &PagePool,
688 ) {
689 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200690 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100691
692 // We reached the required granule level
693 if Self::GRANULE_SIZES[level] == granule {
694 descriptor.set_block_descriptor_to_invalid(level);
695 return;
696 }
697
698 // Need to iterate forward
699 match descriptor.get_descriptor_type(level) {
700 DescriptorType::Invalid => {
701 panic!("Cannot remove block from non-existing table");
702 }
703 DescriptorType::Block => {
704 panic!("Cannot remove block with different granule");
705 }
706 DescriptorType::Table => {
707 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
708 Self::remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200709 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100710 granule,
711 level + 1,
712 next_level_table,
713 page_pool,
714 );
715
716 if next_level_table.iter().all(|d| !d.is_valid()) {
717 // Empty table
718 let mut page = unsafe {
719 Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
720 };
721 page.zero_init();
722 page_pool.release_pages(page).unwrap();
723 }
724 }
725 }
726 }
727
Imre Kisd5b96fd2024-09-11 17:04:32 +0200728 fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
Imre Kis703482d2023-11-30 15:51:26 +0100729 Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
730 }
731
732 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200733 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100734 granule: usize,
735 level: usize,
736 table: &mut [Descriptor],
737 ) -> &mut Descriptor {
738 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200739 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100740
741 if Self::GRANULE_SIZES[level] == granule {
742 return descriptor;
743 }
744
745 // Need to iterate forward
746 match descriptor.get_descriptor_type(level) {
747 DescriptorType::Invalid => {
748 panic!("Invalid descriptor");
749 }
750 DescriptorType::Block => {
751 panic!("Cannot split existing block descriptor to table");
752 }
Imre Kisd5b96fd2024-09-11 17:04:32 +0200753 DescriptorType::Table => {
754 Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
755 descriptor.get_next_level_table_mut(level)
756 })
757 }
Imre Kis703482d2023-11-30 15:51:26 +0100758 }
759 }
760}
761
Imre Kis42935a22024-10-17 11:30:16 +0200762#[cfg(test)]
763mod tests {
764 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100765
Imre Kisd5b96fd2024-09-11 17:04:32 +0200766 fn make_block(pa: usize, va: usize, granule: usize) -> Block {
767 Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
768 }
769
Imre Kis42935a22024-10-17 11:30:16 +0200770 #[test]
771 fn test_split_to_pages() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200772 let pages = Xlat::split_region_to_blocks(
773 PhysicalAddress(0x3fff_c000),
774 VirtualAddress(0x3fff_c000),
775 0x4020_5000,
776 )
777 .unwrap();
778 assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
779 assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
780 assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
781 assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
782 assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
783 assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
784 assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
Imre Kis42935a22024-10-17 11:30:16 +0200785 }
786
787 #[test]
788 fn test_split_to_pages_unaligned() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200789 let pages = Xlat::split_region_to_blocks(
790 PhysicalAddress(0x3fff_c000),
791 VirtualAddress(0x3f20_0000),
792 0x200000,
793 )
794 .unwrap();
Imre Kis42935a22024-10-17 11:30:16 +0200795 for (i, block) in pages.iter().enumerate().take(512) {
796 assert_eq!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200797 make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
Imre Kis42935a22024-10-17 11:30:16 +0200798 *block
799 );
800 }
Imre Kis703482d2023-11-30 15:51:26 +0100801 }
802}