blob: 884f8e0164eaab8603ad9414636733f7517fcb44 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
10use core::arch::asm;
11use core::iter::zip;
12use core::{fmt, panic};
13
Imre Kisd5b96fd2024-09-11 17:04:32 +020014use address::{PhysicalAddress, VirtualAddressRange, VirtualAddress};
Imre Kis703482d2023-11-30 15:51:26 +010015use alloc::boxed::Box;
16use alloc::format;
17use alloc::string::{String, ToString};
18use alloc::vec::Vec;
19use log::debug;
20
21use bitflags::bitflags;
22use packed_struct::prelude::*;
23
24use self::descriptor::DescriptorType;
25
26use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
27use self::kernel_space::KernelSpace;
28use self::page_pool::{Page, PagePool, Pages};
29use self::region::{PhysicalRegion, VirtualRegion};
30use self::region_pool::{Region, RegionPool, RegionPoolError};
31
Imre Kisd5b96fd2024-09-11 17:04:32 +020032pub mod address;
Imre Kis703482d2023-11-30 15:51:26 +010033mod descriptor;
34pub mod kernel_space;
35pub mod page_pool;
36mod region;
37mod region_pool;
38
39/// The first level of memory descriptors table which
40#[repr(C, align(512))]
41pub struct BaseTable {
42 pub descriptors: [Descriptor; 64],
43}
44
45impl BaseTable {
46 pub fn new() -> Self {
47 BaseTable {
Imre Kisf5f6fa72024-04-18 14:04:21 +020048 descriptors: core::array::from_fn(|_| Descriptor::default()),
Imre Kis703482d2023-11-30 15:51:26 +010049 }
50 }
51}
52
53/// Translation table error type
54#[derive(Debug)]
55pub enum XlatError {
56 InvalidParameterError(String),
57 AllocationError(String),
58 AlignmentError(String),
59 Overflow,
60 InvalidOperation(String),
61 Overlap,
62 NotFound,
63 RegionPoolError(RegionPoolError),
64}
65
66/// Memory attributes
67///
68/// MAIR_EL1 should be configured in the same way in startup.s
69#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
70pub enum MemoryAttributesIndex {
71 #[default]
72 Device_nGnRnE = 0x00,
73 Normal_IWBWA_OWBWA = 0x01,
74}
75
76bitflags! {
77 #[derive(Debug, Clone, Copy)]
78 pub struct MemoryAccessRights : u32 {
79 const R = 0b00000001;
80 const W = 0b00000010;
81 const X = 0b00000100;
82 const NS = 0b00001000;
83
84 const RW = Self::R.bits() | Self::W.bits();
85 const RX = Self::R.bits() | Self::X.bits();
86 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
87
88 const USER = 0b00010000;
89 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010090 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010091 }
92}
93
94impl From<MemoryAccessRights> for Attributes {
95 fn from(access_rights: MemoryAccessRights) -> Self {
96 let data_access_permissions = match (
97 access_rights.contains(MemoryAccessRights::USER),
98 access_rights.contains(MemoryAccessRights::W),
99 ) {
100 (false, false) => DataAccessPermissions::ReadOnly_None,
101 (false, true) => DataAccessPermissions::ReadWrite_None,
102 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
103 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
104 };
105
106 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
107 MemoryAttributesIndex::Device_nGnRnE
108 } else {
109 MemoryAttributesIndex::Normal_IWBWA_OWBWA
110 };
111
112 Attributes {
113 uxn: !access_rights.contains(MemoryAccessRights::X)
114 || !access_rights.contains(MemoryAccessRights::USER),
115 pxn: !access_rights.contains(MemoryAccessRights::X)
116 || access_rights.contains(MemoryAccessRights::USER),
117 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100118 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100119 access_flag: true,
120 shareability: Shareability::NonShareable,
121 data_access_permissions,
122 non_secure: access_rights.contains(MemoryAccessRights::NS),
123 mem_attr_index,
124 }
125 }
126}
127
128#[derive(PartialEq)]
129struct Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200130 pa: PhysicalAddress,
131 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100132 granule: usize,
133}
134
135impl Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200136 fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100137 assert!(Xlat::GRANULE_SIZES.contains(&granule));
138 Self { pa, va, granule }
139 }
140}
141
142impl fmt::Debug for Block {
143 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
144 f.debug_struct("Block")
Imre Kisd5b96fd2024-09-11 17:04:32 +0200145 .field("pa", &format_args!("{:#010x}", self.pa.0))
146 .field("va", &format_args!("{:#010x}", self.va.0))
Imre Kis703482d2023-11-30 15:51:26 +0100147 .field("granule", &format_args!("{:#010x}", self.granule))
148 .finish()
149 }
150}
151
Imre Kisc1dab892024-03-26 12:03:58 +0100152/// Enum for selecting TTBR0_EL1 or TTBR1_EL1
Imre Kis9a7440e2024-04-18 15:44:45 +0200153#[allow(clippy::upper_case_acronyms)]
Imre Kisc1dab892024-03-26 12:03:58 +0100154pub enum TTBR {
155 TTBR0_EL1,
156 TTBR1_EL1,
157}
158
Imre Kis703482d2023-11-30 15:51:26 +0100159pub struct Xlat {
160 base_table: Box<BaseTable>,
161 page_pool: PagePool,
162 regions: RegionPool<VirtualRegion>,
163}
164
165/// Memory translation table handling
166/// # High level interface
167/// * allocate and map zero initialized region (with or without VA)
168/// * allocate and map memory region and load contents (with or without VA)
169/// * map memory region by PA (with or without VA)
170/// * unmap memory region by PA
171/// * query PA by VA
172/// * set access rights of mapped memory areas
173/// * active mapping
174///
175/// # Debug features
176/// * print translation table details
177///
178/// # Region level interface
179/// * map regions
180/// * unmap region
181/// * find a mapped region which contains
182/// * find empty area for region
183/// * set access rights for a region
184/// * create blocks by region
185///
186/// # Block level interface
187/// * map block
188/// * unmap block
189/// * set access rights of block
190impl Xlat {
Imre Kis703482d2023-11-30 15:51:26 +0100191 pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
192
Imre Kisd5b96fd2024-09-11 17:04:32 +0200193 pub fn new(page_pool: PagePool, va_range: VirtualAddressRange) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100194 let mut regions = RegionPool::new();
195 regions
Imre Kisd5b96fd2024-09-11 17:04:32 +0200196 .add(VirtualRegion::new(va_range.start, va_range.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100197 .unwrap();
198 Self {
199 base_table: Box::new(BaseTable::new()),
200 page_pool,
201 regions,
202 }
203 }
204
205 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
206 /// initial data
207 /// # Arguments
208 /// * va: Virtual address of the memory area
209 /// * data: Data to be loaded to the memory area
210 /// * access_rights: Memory access rights of the area
211 /// # Return value
212 /// * Virtual address of the mapped memory
213 pub fn allocate_initalized_range(
214 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200215 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100216 data: &[u8],
217 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200218 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100219 let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
220 XlatError::AllocationError(format!(
221 "Cannot allocate pages for {} bytes ({:?})",
222 data.len(),
223 e
224 ))
225 })?;
226
227 pages.copy_data_to_page(data);
228
229 let pages_length = pages.length();
230 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
231 let region = if let Some(required_va) = va {
232 self.regions
233 .acquire(required_va, pages_length, physical_region)
234 } else {
235 self.regions.allocate(pages_length, physical_region)
236 }
237 .map_err(XlatError::RegionPoolError)?;
238
239 self.map_region(region, access_rights.into())
240 }
241
242 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
243 /// # Arguments
244 /// * va: Virtual address of the memory area
245 /// * length: Length of the memory area in bytes
246 /// * access_rights: Memory access rights of the area
247 /// # Return value
248 /// * Virtual address of the mapped memory
249 pub fn allocate_zero_init_range(
250 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200251 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100252 length: usize,
253 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200254 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100255 let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
256 XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
257 })?;
258
259 pages.zero_init();
260
261 let pages_length = pages.length();
262 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
263 let region = if let Some(required_va) = va {
264 self.regions
265 .acquire(required_va, pages_length, physical_region)
266 } else {
267 self.regions.allocate(pages_length, physical_region)
268 }
269 .map_err(XlatError::RegionPoolError)?;
270
271 self.map_region(region, access_rights.into())
272 }
273
274 /// Map memory area by physical address
275 /// # Arguments
276 /// * va: Virtual address of the memory area
277 /// * pa: Physical address of the memory area
278 /// * length: Length of the memory area in bytes
279 /// * access_rights: Memory access rights of the area
280 /// # Return value
281 /// * Virtual address of the mapped memory
282 pub fn map_physical_address_range(
283 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200284 va: Option<VirtualAddress>,
285 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100286 length: usize,
287 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200288 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100289 let resource = PhysicalRegion::PhysicalAddress(pa);
290 let region = if let Some(required_va) = va {
291 self.regions.acquire(required_va, length, resource)
292 } else {
293 self.regions.allocate(length, resource)
294 }
295 .map_err(XlatError::RegionPoolError)?;
296
297 self.map_region(region, access_rights.into())
298 }
299
300 /// Unmap memory area by virtual address
301 /// # Arguments
302 /// * va: Virtual address
303 /// * length: Length of the memory area in bytes
304 pub fn unmap_virtual_address_range(
305 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200306 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100307 length: usize,
308 ) -> Result<(), XlatError> {
309 let pa = self.get_pa_by_va(va, length)?;
310
311 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
312
313 self.unmap_region(&region_to_release)?;
314
315 self.regions
316 .release(region_to_release)
317 .map_err(XlatError::RegionPoolError)
318 }
319
320 /// Query physical address by virtual address range. Only returns a value if the memory area
321 /// mapped as continuous area.
322 /// # Arguments
323 /// * va: Virtual address of the memory area
324 /// * length: Length of the memory area in bytes
325 /// # Return value
326 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200327 pub fn get_pa_by_va(
328 &self,
329 va: VirtualAddress,
330 length: usize,
331 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100332 let containing_region = self
333 .find_containing_region(va, length)
334 .ok_or(XlatError::NotFound)?;
335
336 if !containing_region.used() {
337 return Err(XlatError::NotFound);
338 }
339
340 Ok(containing_region.get_pa_for_va(va))
341 }
342
343 /// Sets the memory access right of memory area
344 /// # Arguments
345 /// * va: Virtual address of the memory area
346 /// * length: Length of the memory area in bytes
347 /// * access_rights: New memory access rights of the area
348 pub fn set_access_rights(
349 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200350 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100351 length: usize,
352 access_rights: MemoryAccessRights,
353 ) -> Result<(), XlatError> {
354 let containing_region = self
355 .find_containing_region(va, length)
356 .ok_or(XlatError::NotFound)?;
357
358 if !containing_region.used() {
359 return Err(XlatError::NotFound);
360 }
361
362 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
363 self.map_region(region, access_rights.into())?;
364
365 Ok(())
366 }
367
368 /// Activate memory mapping represented by the object
369 /// # Arguments
370 /// * asid: ASID of the table base address
Imre Kisc1dab892024-03-26 12:03:58 +0100371 /// * ttbr: Selects TTBR0_EL1/TTBR1_EL1
372 pub fn activate(&self, asid: u8, ttbr: TTBR) {
Imre Kis703482d2023-11-30 15:51:26 +0100373 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100374 let ttbr_value = ((asid as u64) << 48) | base_table_pa;
375 #[cfg(target_arch = "aarch64")]
376 match ttbr {
377 TTBR::TTBR0_EL1 => unsafe {
378 asm!(
379 "msr ttbr0_el1, {0}
380 isb",
381 in(reg) ttbr_value)
382 },
383 TTBR::TTBR1_EL1 => unsafe {
384 asm!(
385 "msr ttbr1_el1, {0}
386 isb
387
388 tlbi vmalle1
389 dsb sy
390 isb",
391 in(reg) ttbr_value)
392 },
393 }
Imre Kis703482d2023-11-30 15:51:26 +0100394 }
395
396 /// Prints the translation tables to debug console recursively
397 pub fn print(&self) {
398 debug!(
399 "Xlat table -> {:#010x}",
400 self.base_table.descriptors.as_ptr() as u64
401 );
402 Self::print_table(1, 0, &self.base_table.descriptors);
403 }
404
405 /// Prints a single translation table to the debug console
406 /// # Arguments
407 /// * level: Level of the translation table
408 /// * va: Base virtual address of the table
409 /// * table: Table entries
410 pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
411 let level_prefix = match level {
412 0 | 1 => "|-",
413 2 => "| |-",
414 _ => "| | |-",
415 };
416
417 for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
418 match descriptor.get_descriptor_type(level) {
419 DescriptorType::Block => debug!(
420 "{} {:#010x} Block -> {:#010x}",
421 level_prefix,
422 va,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200423 descriptor.get_block_output_address(level).0
Imre Kis703482d2023-11-30 15:51:26 +0100424 ),
425 DescriptorType::Table => {
426 let next_level_table = unsafe { descriptor.get_next_level_table(level) };
427 debug!(
428 "{} {:#010x} Table -> {:#010x}",
429 level_prefix,
430 va,
431 next_level_table.as_ptr() as usize
432 );
433 Self::print_table(level + 1, va, next_level_table);
434 }
435 _ => {}
436 }
437 }
438 }
439
440 /// Adds memory region from the translation table. The function splits the region to blocks and
441 /// uses the block level functions to do the mapping.
442 /// # Arguments
443 /// * region: Memory region object
444 /// # Return value
445 /// * Virtual address of the mapped memory
446 fn map_region(
447 &mut self,
448 region: VirtualRegion,
449 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200450 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100451 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
452 for block in blocks {
453 self.map_block(block, attributes.clone());
454 }
455
456 Ok(region.base())
457 }
458
459 /// Remove memory region from the translation table. The function splits the region to blocks
460 /// and uses the block level functions to do the unmapping.
461 /// # Arguments
462 /// * region: Memory region object
463 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
464 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
465 for block in blocks {
466 self.unmap_block(block);
467 }
468
469 Ok(())
470 }
471
472 /// Find mapped region that contains the whole region
473 /// # Arguments
474 /// * region: Virtual address to look for
475 /// # Return value
476 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200477 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100478 self.regions.find_containing_region(va, length).ok()
479 }
480
481 /// Splits memory region to blocks that matches the granule size of the translation table.
482 /// # Arguments
483 /// * pa: Physical address
484 /// * va: Virtual address
485 /// * length: Region size in bytes
486 /// # Return value
487 /// * Vector of granule sized blocks
488 fn split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200489 mut pa: PhysicalAddress,
490 mut va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100491 mut length: usize,
492 ) -> Result<Vec<Block>, XlatError> {
493 let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
494
495 if length == 0 {
496 return Err(XlatError::InvalidParameterError(
497 "Length cannot be 0".to_string(),
498 ));
499 }
500
Imre Kisd5b96fd2024-09-11 17:04:32 +0200501 if (pa.0 | va.0 | length) & min_granule_mask != 0 {
Imre Kis703482d2023-11-30 15:51:26 +0100502 return Err(XlatError::InvalidParameterError(format!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200503 "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
504 pa.0, va.0, length
Imre Kis703482d2023-11-30 15:51:26 +0100505 )));
506 }
507
508 let mut pages = Vec::new();
509
510 while length > 0 {
511 for granule in &Self::GRANULE_SIZES {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200512 if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
Imre Kis703482d2023-11-30 15:51:26 +0100513 pages.push(Block::new(pa, va, *granule));
Imre Kisd5b96fd2024-09-11 17:04:32 +0200514 pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
515 va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
Imre Kis703482d2023-11-30 15:51:26 +0100516
517 length -= *granule;
518 break;
519 }
520 }
521 }
522
523 Ok(pages)
524 }
525
526 /// Add block to memory mapping
527 /// # Arguments
528 /// * block: Memory block that can be represented by a single translation table entry
529 /// * attributes: Memory block's permissions, flags
530 fn map_block(&mut self, block: Block, attributes: Attributes) {
531 Self::set_block_descriptor_recursively(
532 attributes,
533 block.pa,
534 block.va,
535 block.granule,
536 1,
537 self.base_table.descriptors.as_mut_slice(),
538 &self.page_pool,
539 );
540 }
541
542 /// Adds the block descriptor to the translation table along all the intermediate tables the
543 /// reach the required granule.
544 /// # Arguments
545 /// * attributes: Memory block's permssions, flags
546 /// * pa: Physical address
547 /// * va: Virtual address
548 /// * granule: Translation granule in bytes
549 /// * level: Translation table level
550 /// * table: Translation table on the given level
551 /// * page_pool: Page pool where the function can allocate pages for the translation tables
552 fn set_block_descriptor_recursively(
553 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200554 pa: PhysicalAddress,
555 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100556 granule: usize,
557 level: usize,
558 table: &mut [Descriptor],
559 page_pool: &PagePool,
560 ) {
561 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200562 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100563
564 // We reached the required granule level
565 if Self::GRANULE_SIZES[level] == granule {
566 descriptor.set_block_descriptor(level, pa, attributes);
567 return;
568 }
569
570 // Need to iterate forward
571 match descriptor.get_descriptor_type(level) {
572 DescriptorType::Invalid => {
573 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
574 unsafe {
575 let next_table = page.get_as_slice();
576 descriptor.set_table_descriptor(level, next_table, None);
577 }
578 Self::set_block_descriptor_recursively(
579 attributes,
580 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200581 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100582 granule,
583 level + 1,
584 unsafe { descriptor.get_next_level_table_mut(level) },
585 page_pool,
586 )
587 }
588 DescriptorType::Block => {
589 // Saving current descriptor details
Imre Kisd5b96fd2024-09-11 17:04:32 +0200590 let current_va = va.mask_for_level(level);
Imre Kis703482d2023-11-30 15:51:26 +0100591 let current_pa = descriptor.get_block_output_address(level);
592 let current_attributes = descriptor.get_block_attributes(level);
593
594 // Replace block descriptor by table descriptor
595 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
596 unsafe {
597 let next_table = page.get_as_slice();
598 descriptor.set_table_descriptor(level, next_table, None);
599 }
600
601 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200602 for exploded_va in VirtualAddressRange::new(
603 current_va,
604 current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
605 )
606 .step_by(Self::GRANULE_SIZES[level + 1])
Imre Kis703482d2023-11-30 15:51:26 +0100607 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200608 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100609 Self::set_block_descriptor_recursively(
610 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200611 current_pa.add_offset(offset).unwrap(),
612 exploded_va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100613 Self::GRANULE_SIZES[level + 1],
614 level + 1,
615 unsafe { descriptor.get_next_level_table_mut(level) },
616 page_pool,
617 )
618 }
619
620 // Invoke self to continue recursion on the newly created level
621 Self::set_block_descriptor_recursively(
622 attributes, pa, va, granule, level, table, page_pool,
623 );
624 }
625 DescriptorType::Table => Self::set_block_descriptor_recursively(
626 attributes,
627 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200628 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100629 granule,
630 level + 1,
631 unsafe { descriptor.get_next_level_table_mut(level) },
632 page_pool,
633 ),
634 }
635 }
636
637 /// Remove block from memory mapping
638 /// # Arguments
639 /// * block: memory block that can be represented by a single translation entry
640 fn unmap_block(&mut self, block: Block) {
641 Self::remove_block_descriptor_recursively(
642 block.va,
643 block.granule,
644 1,
645 self.base_table.descriptors.as_mut_slice(),
646 &self.page_pool,
647 );
648 }
649
650 /// Removes block descriptor from the translation table along all the intermediate tables which
651 /// become empty during the removal process.
652 /// # Arguments
653 /// * va: Virtual address
654 /// * granule: Translation granule in bytes
655 /// * level: Translation table level
656 /// * table: Translation table on the given level
657 /// * page_pool: Page pool where the function can release the pages of empty tables
658 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200659 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100660 granule: usize,
661 level: usize,
662 table: &mut [Descriptor],
663 page_pool: &PagePool,
664 ) {
665 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200666 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100667
668 // We reached the required granule level
669 if Self::GRANULE_SIZES[level] == granule {
670 descriptor.set_block_descriptor_to_invalid(level);
671 return;
672 }
673
674 // Need to iterate forward
675 match descriptor.get_descriptor_type(level) {
676 DescriptorType::Invalid => {
677 panic!("Cannot remove block from non-existing table");
678 }
679 DescriptorType::Block => {
680 panic!("Cannot remove block with different granule");
681 }
682 DescriptorType::Table => {
683 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
684 Self::remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200685 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100686 granule,
687 level + 1,
688 next_level_table,
689 page_pool,
690 );
691
692 if next_level_table.iter().all(|d| !d.is_valid()) {
693 // Empty table
694 let mut page = unsafe {
695 Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
696 };
697 page.zero_init();
698 page_pool.release_pages(page).unwrap();
699 }
700 }
701 }
702 }
703
Imre Kisd5b96fd2024-09-11 17:04:32 +0200704 fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
Imre Kis703482d2023-11-30 15:51:26 +0100705 Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
706 }
707
708 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200709 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100710 granule: usize,
711 level: usize,
712 table: &mut [Descriptor],
713 ) -> &mut Descriptor {
714 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200715 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100716
717 if Self::GRANULE_SIZES[level] == granule {
718 return descriptor;
719 }
720
721 // Need to iterate forward
722 match descriptor.get_descriptor_type(level) {
723 DescriptorType::Invalid => {
724 panic!("Invalid descriptor");
725 }
726 DescriptorType::Block => {
727 panic!("Cannot split existing block descriptor to table");
728 }
Imre Kisd5b96fd2024-09-11 17:04:32 +0200729 DescriptorType::Table => {
730 Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
731 descriptor.get_next_level_table_mut(level)
732 })
733 }
Imre Kis703482d2023-11-30 15:51:26 +0100734 }
735 }
736}
737
Imre Kis42935a22024-10-17 11:30:16 +0200738#[cfg(test)]
739mod tests {
740 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100741
Imre Kisd5b96fd2024-09-11 17:04:32 +0200742 fn make_block(pa: usize, va: usize, granule: usize) -> Block {
743 Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
744 }
745
Imre Kis42935a22024-10-17 11:30:16 +0200746 #[test]
747 fn test_split_to_pages() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200748 let pages = Xlat::split_region_to_blocks(
749 PhysicalAddress(0x3fff_c000),
750 VirtualAddress(0x3fff_c000),
751 0x4020_5000,
752 )
753 .unwrap();
754 assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
755 assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
756 assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
757 assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
758 assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
759 assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
760 assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
Imre Kis42935a22024-10-17 11:30:16 +0200761 }
762
763 #[test]
764 fn test_split_to_pages_unaligned() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200765 let pages = Xlat::split_region_to_blocks(
766 PhysicalAddress(0x3fff_c000),
767 VirtualAddress(0x3f20_0000),
768 0x200000,
769 )
770 .unwrap();
Imre Kis42935a22024-10-17 11:30:16 +0200771 for (i, block) in pages.iter().enumerate().take(512) {
772 assert_eq!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200773 make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
Imre Kis42935a22024-10-17 11:30:16 +0200774 *block
775 );
776 }
Imre Kis703482d2023-11-30 15:51:26 +0100777 }
778}