blob: f5b1962506c54fdbae0c27a87d48d737b55ddfa3 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
11use core::{fmt, panic};
12
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis703482d2023-11-30 15:51:26 +010014use alloc::boxed::Box;
15use alloc::format;
16use alloc::string::{String, ToString};
17use alloc::vec::Vec;
18use log::debug;
19
20use bitflags::bitflags;
21use packed_struct::prelude::*;
22
23use self::descriptor::DescriptorType;
24
25use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
26use self::kernel_space::KernelSpace;
27use self::page_pool::{Page, PagePool, Pages};
28use self::region::{PhysicalRegion, VirtualRegion};
29use self::region_pool::{Region, RegionPool, RegionPoolError};
30
Imre Kisd5b96fd2024-09-11 17:04:32 +020031pub mod address;
Imre Kis703482d2023-11-30 15:51:26 +010032mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010033mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010034pub mod kernel_space;
35pub mod page_pool;
36mod region;
37mod region_pool;
38
39/// The first level of memory descriptors table which
40#[repr(C, align(512))]
41pub struct BaseTable {
42 pub descriptors: [Descriptor; 64],
43}
44
45impl BaseTable {
46 pub fn new() -> Self {
47 BaseTable {
Imre Kisf5f6fa72024-04-18 14:04:21 +020048 descriptors: core::array::from_fn(|_| Descriptor::default()),
Imre Kis703482d2023-11-30 15:51:26 +010049 }
50 }
51}
52
53/// Translation table error type
54#[derive(Debug)]
55pub enum XlatError {
56 InvalidParameterError(String),
57 AllocationError(String),
58 AlignmentError(String),
59 Overflow,
60 InvalidOperation(String),
61 Overlap,
62 NotFound,
63 RegionPoolError(RegionPoolError),
64}
65
66/// Memory attributes
67///
68/// MAIR_EL1 should be configured in the same way in startup.s
69#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
70pub enum MemoryAttributesIndex {
71 #[default]
72 Device_nGnRnE = 0x00,
73 Normal_IWBWA_OWBWA = 0x01,
74}
75
76bitflags! {
77 #[derive(Debug, Clone, Copy)]
78 pub struct MemoryAccessRights : u32 {
79 const R = 0b00000001;
80 const W = 0b00000010;
81 const X = 0b00000100;
82 const NS = 0b00001000;
83
84 const RW = Self::R.bits() | Self::W.bits();
85 const RX = Self::R.bits() | Self::X.bits();
86 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
87
88 const USER = 0b00010000;
89 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010090 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010091 }
92}
93
94impl From<MemoryAccessRights> for Attributes {
95 fn from(access_rights: MemoryAccessRights) -> Self {
96 let data_access_permissions = match (
97 access_rights.contains(MemoryAccessRights::USER),
98 access_rights.contains(MemoryAccessRights::W),
99 ) {
100 (false, false) => DataAccessPermissions::ReadOnly_None,
101 (false, true) => DataAccessPermissions::ReadWrite_None,
102 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
103 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
104 };
105
106 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
107 MemoryAttributesIndex::Device_nGnRnE
108 } else {
109 MemoryAttributesIndex::Normal_IWBWA_OWBWA
110 };
111
112 Attributes {
113 uxn: !access_rights.contains(MemoryAccessRights::X)
114 || !access_rights.contains(MemoryAccessRights::USER),
115 pxn: !access_rights.contains(MemoryAccessRights::X)
116 || access_rights.contains(MemoryAccessRights::USER),
117 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100118 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100119 access_flag: true,
120 shareability: Shareability::NonShareable,
121 data_access_permissions,
122 non_secure: access_rights.contains(MemoryAccessRights::NS),
123 mem_attr_index,
124 }
125 }
126}
127
128#[derive(PartialEq)]
129struct Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200130 pa: PhysicalAddress,
131 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100132 granule: usize,
133}
134
135impl Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200136 fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100137 assert!(Xlat::GRANULE_SIZES.contains(&granule));
138 Self { pa, va, granule }
139 }
140}
141
142impl fmt::Debug for Block {
143 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
144 f.debug_struct("Block")
Imre Kisd5b96fd2024-09-11 17:04:32 +0200145 .field("pa", &format_args!("{:#010x}", self.pa.0))
146 .field("va", &format_args!("{:#010x}", self.va.0))
Imre Kis703482d2023-11-30 15:51:26 +0100147 .field("granule", &format_args!("{:#010x}", self.granule))
148 .finish()
149 }
150}
151
Imre Kisb5146b52024-10-31 14:03:06 +0100152pub enum RegimeVaRange {
153 Lower,
154 Upper,
155}
156
157pub enum TranslationRegime {
158 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
159 #[cfg(target_feature = "vh")]
160 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
161 EL2, // EL2
162 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100163}
164
Imre Kis725ef5e2024-11-20 14:20:19 +0100165pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
166
Imre Kis703482d2023-11-30 15:51:26 +0100167pub struct Xlat {
168 base_table: Box<BaseTable>,
169 page_pool: PagePool,
170 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100171 regime: TranslationRegime,
Imre Kis703482d2023-11-30 15:51:26 +0100172}
173
174/// Memory translation table handling
175/// # High level interface
176/// * allocate and map zero initialized region (with or without VA)
177/// * allocate and map memory region and load contents (with or without VA)
178/// * map memory region by PA (with or without VA)
179/// * unmap memory region by PA
180/// * query PA by VA
181/// * set access rights of mapped memory areas
182/// * active mapping
183///
184/// # Debug features
185/// * print translation table details
186///
187/// # Region level interface
188/// * map regions
189/// * unmap region
190/// * find a mapped region which contains
191/// * find empty area for region
192/// * set access rights for a region
193/// * create blocks by region
194///
195/// # Block level interface
196/// * map block
197/// * unmap block
198/// * set access rights of block
199impl Xlat {
Imre Kis703482d2023-11-30 15:51:26 +0100200 pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
201
Imre Kisb5146b52024-10-31 14:03:06 +0100202 pub fn new(
203 page_pool: PagePool,
204 address: VirtualAddressRange,
205 regime: TranslationRegime,
206 ) -> Self {
Imre Kis703482d2023-11-30 15:51:26 +0100207 let mut regions = RegionPool::new();
208 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100209 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100210 .unwrap();
211 Self {
212 base_table: Box::new(BaseTable::new()),
213 page_pool,
214 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100215 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100216 }
217 }
218
219 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
220 /// initial data
221 /// # Arguments
222 /// * va: Virtual address of the memory area
223 /// * data: Data to be loaded to the memory area
224 /// * access_rights: Memory access rights of the area
225 /// # Return value
226 /// * Virtual address of the mapped memory
227 pub fn allocate_initalized_range(
228 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200229 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100230 data: &[u8],
231 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200232 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100233 let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
234 XlatError::AllocationError(format!(
235 "Cannot allocate pages for {} bytes ({:?})",
236 data.len(),
237 e
238 ))
239 })?;
240
241 pages.copy_data_to_page(data);
242
243 let pages_length = pages.length();
244 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
245 let region = if let Some(required_va) = va {
246 self.regions
247 .acquire(required_va, pages_length, physical_region)
248 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100249 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100250 }
251 .map_err(XlatError::RegionPoolError)?;
252
253 self.map_region(region, access_rights.into())
254 }
255
256 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
257 /// # Arguments
258 /// * va: Virtual address of the memory area
259 /// * length: Length of the memory area in bytes
260 /// * access_rights: Memory access rights of the area
261 /// # Return value
262 /// * Virtual address of the mapped memory
263 pub fn allocate_zero_init_range(
264 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200265 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100266 length: usize,
267 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200268 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100269 let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
270 XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
271 })?;
272
273 pages.zero_init();
274
275 let pages_length = pages.length();
276 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
277 let region = if let Some(required_va) = va {
278 self.regions
279 .acquire(required_va, pages_length, physical_region)
280 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100281 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100282 }
283 .map_err(XlatError::RegionPoolError)?;
284
285 self.map_region(region, access_rights.into())
286 }
287
288 /// Map memory area by physical address
289 /// # Arguments
290 /// * va: Virtual address of the memory area
291 /// * pa: Physical address of the memory area
292 /// * length: Length of the memory area in bytes
293 /// * access_rights: Memory access rights of the area
294 /// # Return value
295 /// * Virtual address of the mapped memory
296 pub fn map_physical_address_range(
297 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200298 va: Option<VirtualAddress>,
299 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100300 length: usize,
301 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200302 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100303 let resource = PhysicalRegion::PhysicalAddress(pa);
304 let region = if let Some(required_va) = va {
305 self.regions.acquire(required_va, length, resource)
306 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100307 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100308 }
309 .map_err(XlatError::RegionPoolError)?;
310
311 self.map_region(region, access_rights.into())
312 }
313
314 /// Unmap memory area by virtual address
315 /// # Arguments
316 /// * va: Virtual address
317 /// * length: Length of the memory area in bytes
318 pub fn unmap_virtual_address_range(
319 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200320 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100321 length: usize,
322 ) -> Result<(), XlatError> {
323 let pa = self.get_pa_by_va(va, length)?;
324
325 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
326
327 self.unmap_region(&region_to_release)?;
328
329 self.regions
330 .release(region_to_release)
331 .map_err(XlatError::RegionPoolError)
332 }
333
334 /// Query physical address by virtual address range. Only returns a value if the memory area
335 /// mapped as continuous area.
336 /// # Arguments
337 /// * va: Virtual address of the memory area
338 /// * length: Length of the memory area in bytes
339 /// # Return value
340 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200341 pub fn get_pa_by_va(
342 &self,
343 va: VirtualAddress,
344 length: usize,
345 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100346 let containing_region = self
347 .find_containing_region(va, length)
348 .ok_or(XlatError::NotFound)?;
349
350 if !containing_region.used() {
351 return Err(XlatError::NotFound);
352 }
353
354 Ok(containing_region.get_pa_for_va(va))
355 }
356
357 /// Sets the memory access right of memory area
358 /// # Arguments
359 /// * va: Virtual address of the memory area
360 /// * length: Length of the memory area in bytes
361 /// * access_rights: New memory access rights of the area
362 pub fn set_access_rights(
363 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200364 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100365 length: usize,
366 access_rights: MemoryAccessRights,
367 ) -> Result<(), XlatError> {
368 let containing_region = self
369 .find_containing_region(va, length)
370 .ok_or(XlatError::NotFound)?;
371
372 if !containing_region.used() {
373 return Err(XlatError::NotFound);
374 }
375
376 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
377 self.map_region(region, access_rights.into())?;
378
379 Ok(())
380 }
381
382 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100383 ///
384 /// # Safety
385 /// When activating memory mapping for the running exception level, the
386 /// caller must ensure that the new mapping will not break any existing
387 /// references. After activation the caller must ensure that there are no
388 /// active references when unmapping memory.
389 pub unsafe fn activate(&self) {
Imre Kis703482d2023-11-30 15:51:26 +0100390 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100391
Imre Kisb5146b52024-10-31 14:03:06 +0100392 #[cfg(target_arch = "aarch64")]
393 match &self.regime {
394 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
395 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100396 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100397 in(reg) ((*asid as u64) << 48) | base_table_pa),
398 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
399 "msr ttbr1_el1, {0}
400 isb",
401 in(reg) ((*asid as u64) << 48) | base_table_pa),
402 #[cfg(target_feature = "vh")]
403 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
404 "msr ttbr0_el2, {0}
405 isb",
406 in(reg) ((*asid as u64) << 48) | base_table_pa),
407 #[cfg(target_feature = "vh")]
408 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
409 "msr ttbr1_el2, {0}
410 isb",
411 in(reg) ((*asid as u64) << 48) | base_table_pa),
412 TranslationRegime::EL2 => core::arch::asm!(
413 "msr ttbr0_el2, {0}
414 isb",
415 in(reg) base_table_pa),
416 TranslationRegime::EL3 => core::arch::asm!(
417 "msr ttbr0_el3, {0}
418 isb",
419 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100420 }
Imre Kis703482d2023-11-30 15:51:26 +0100421 }
422
423 /// Prints the translation tables to debug console recursively
424 pub fn print(&self) {
425 debug!(
426 "Xlat table -> {:#010x}",
427 self.base_table.descriptors.as_ptr() as u64
428 );
429 Self::print_table(1, 0, &self.base_table.descriptors);
430 }
431
432 /// Prints a single translation table to the debug console
433 /// # Arguments
434 /// * level: Level of the translation table
435 /// * va: Base virtual address of the table
436 /// * table: Table entries
437 pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
438 let level_prefix = match level {
439 0 | 1 => "|-",
440 2 => "| |-",
441 _ => "| | |-",
442 };
443
444 for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
445 match descriptor.get_descriptor_type(level) {
446 DescriptorType::Block => debug!(
447 "{} {:#010x} Block -> {:#010x}",
448 level_prefix,
449 va,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200450 descriptor.get_block_output_address(level).0
Imre Kis703482d2023-11-30 15:51:26 +0100451 ),
452 DescriptorType::Table => {
453 let next_level_table = unsafe { descriptor.get_next_level_table(level) };
454 debug!(
455 "{} {:#010x} Table -> {:#010x}",
456 level_prefix,
457 va,
458 next_level_table.as_ptr() as usize
459 );
460 Self::print_table(level + 1, va, next_level_table);
461 }
462 _ => {}
463 }
464 }
465 }
466
467 /// Adds memory region from the translation table. The function splits the region to blocks and
468 /// uses the block level functions to do the mapping.
469 /// # Arguments
470 /// * region: Memory region object
471 /// # Return value
472 /// * Virtual address of the mapped memory
473 fn map_region(
474 &mut self,
475 region: VirtualRegion,
476 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200477 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100478 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
479 for block in blocks {
480 self.map_block(block, attributes.clone());
481 }
482
483 Ok(region.base())
484 }
485
486 /// Remove memory region from the translation table. The function splits the region to blocks
487 /// and uses the block level functions to do the unmapping.
488 /// # Arguments
489 /// * region: Memory region object
490 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
491 let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
492 for block in blocks {
493 self.unmap_block(block);
494 }
495
496 Ok(())
497 }
498
499 /// Find mapped region that contains the whole region
500 /// # Arguments
501 /// * region: Virtual address to look for
502 /// # Return value
503 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200504 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100505 self.regions.find_containing_region(va, length).ok()
506 }
507
508 /// Splits memory region to blocks that matches the granule size of the translation table.
509 /// # Arguments
510 /// * pa: Physical address
511 /// * va: Virtual address
512 /// * length: Region size in bytes
513 /// # Return value
514 /// * Vector of granule sized blocks
515 fn split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200516 mut pa: PhysicalAddress,
517 mut va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100518 mut length: usize,
519 ) -> Result<Vec<Block>, XlatError> {
520 let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
521
522 if length == 0 {
523 return Err(XlatError::InvalidParameterError(
524 "Length cannot be 0".to_string(),
525 ));
526 }
527
Imre Kisd5b96fd2024-09-11 17:04:32 +0200528 if (pa.0 | va.0 | length) & min_granule_mask != 0 {
Imre Kis703482d2023-11-30 15:51:26 +0100529 return Err(XlatError::InvalidParameterError(format!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200530 "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
531 pa.0, va.0, length
Imre Kis703482d2023-11-30 15:51:26 +0100532 )));
533 }
534
535 let mut pages = Vec::new();
536
537 while length > 0 {
538 for granule in &Self::GRANULE_SIZES {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200539 if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
Imre Kis703482d2023-11-30 15:51:26 +0100540 pages.push(Block::new(pa, va, *granule));
Imre Kisd5b96fd2024-09-11 17:04:32 +0200541 pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
542 va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
Imre Kis703482d2023-11-30 15:51:26 +0100543
544 length -= *granule;
545 break;
546 }
547 }
548 }
549
550 Ok(pages)
551 }
552
553 /// Add block to memory mapping
554 /// # Arguments
555 /// * block: Memory block that can be represented by a single translation table entry
556 /// * attributes: Memory block's permissions, flags
557 fn map_block(&mut self, block: Block, attributes: Attributes) {
558 Self::set_block_descriptor_recursively(
559 attributes,
560 block.pa,
561 block.va,
562 block.granule,
563 1,
564 self.base_table.descriptors.as_mut_slice(),
565 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100566 &self.regime,
Imre Kis703482d2023-11-30 15:51:26 +0100567 );
568 }
569
570 /// Adds the block descriptor to the translation table along all the intermediate tables the
571 /// reach the required granule.
572 /// # Arguments
573 /// * attributes: Memory block's permssions, flags
574 /// * pa: Physical address
575 /// * va: Virtual address
576 /// * granule: Translation granule in bytes
577 /// * level: Translation table level
578 /// * table: Translation table on the given level
579 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis9a9d0492024-10-31 15:19:46 +0100580 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100581 fn set_block_descriptor_recursively(
582 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200583 pa: PhysicalAddress,
584 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100585 granule: usize,
586 level: usize,
587 table: &mut [Descriptor],
588 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100589 regime: &TranslationRegime,
Imre Kis703482d2023-11-30 15:51:26 +0100590 ) {
591 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200592 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100593
594 // We reached the required granule level
595 if Self::GRANULE_SIZES[level] == granule {
Imre Kis9a9d0492024-10-31 15:19:46 +0100596 // Follow break-before-make sequence
597 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
598 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100599 descriptor.set_block_descriptor(level, pa, attributes);
600 return;
601 }
602
603 // Need to iterate forward
604 match descriptor.get_descriptor_type(level) {
605 DescriptorType::Invalid => {
606 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
607 unsafe {
608 let next_table = page.get_as_slice();
609 descriptor.set_table_descriptor(level, next_table, None);
610 }
611 Self::set_block_descriptor_recursively(
612 attributes,
613 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200614 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100615 granule,
616 level + 1,
617 unsafe { descriptor.get_next_level_table_mut(level) },
618 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100619 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100620 )
621 }
622 DescriptorType::Block => {
623 // Saving current descriptor details
Imre Kisd5b96fd2024-09-11 17:04:32 +0200624 let current_va = va.mask_for_level(level);
Imre Kis703482d2023-11-30 15:51:26 +0100625 let current_pa = descriptor.get_block_output_address(level);
626 let current_attributes = descriptor.get_block_attributes(level);
627
628 // Replace block descriptor by table descriptor
629 let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
630 unsafe {
631 let next_table = page.get_as_slice();
632 descriptor.set_table_descriptor(level, next_table, None);
633 }
634
635 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200636 for exploded_va in VirtualAddressRange::new(
637 current_va,
638 current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
639 )
640 .step_by(Self::GRANULE_SIZES[level + 1])
Imre Kis703482d2023-11-30 15:51:26 +0100641 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200642 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100643 Self::set_block_descriptor_recursively(
644 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200645 current_pa.add_offset(offset).unwrap(),
646 exploded_va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100647 Self::GRANULE_SIZES[level + 1],
648 level + 1,
649 unsafe { descriptor.get_next_level_table_mut(level) },
650 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100651 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100652 )
653 }
654
655 // Invoke self to continue recursion on the newly created level
656 Self::set_block_descriptor_recursively(
Imre Kis9a9d0492024-10-31 15:19:46 +0100657 attributes, pa, va, granule, level, table, page_pool, regime,
Imre Kis703482d2023-11-30 15:51:26 +0100658 );
659 }
660 DescriptorType::Table => Self::set_block_descriptor_recursively(
661 attributes,
662 pa,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200663 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100664 granule,
665 level + 1,
666 unsafe { descriptor.get_next_level_table_mut(level) },
667 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100668 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100669 ),
670 }
671 }
672
673 /// Remove block from memory mapping
674 /// # Arguments
675 /// * block: memory block that can be represented by a single translation entry
676 fn unmap_block(&mut self, block: Block) {
677 Self::remove_block_descriptor_recursively(
678 block.va,
679 block.granule,
680 1,
681 self.base_table.descriptors.as_mut_slice(),
682 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100683 &self.regime,
Imre Kis703482d2023-11-30 15:51:26 +0100684 );
685 }
686
687 /// Removes block descriptor from the translation table along all the intermediate tables which
688 /// become empty during the removal process.
689 /// # Arguments
690 /// * va: Virtual address
691 /// * granule: Translation granule in bytes
692 /// * level: Translation table level
693 /// * table: Translation table on the given level
694 /// * page_pool: Page pool where the function can release the pages of empty tables
695 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200696 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100697 granule: usize,
698 level: usize,
699 table: &mut [Descriptor],
700 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100701 regime: &TranslationRegime,
Imre Kis703482d2023-11-30 15:51:26 +0100702 ) {
703 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200704 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100705
706 // We reached the required granule level
707 if Self::GRANULE_SIZES[level] == granule {
708 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100709 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100710 return;
711 }
712
713 // Need to iterate forward
714 match descriptor.get_descriptor_type(level) {
715 DescriptorType::Invalid => {
716 panic!("Cannot remove block from non-existing table");
717 }
718 DescriptorType::Block => {
719 panic!("Cannot remove block with different granule");
720 }
721 DescriptorType::Table => {
722 let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
723 Self::remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200724 va.mask_for_level(level),
Imre Kis703482d2023-11-30 15:51:26 +0100725 granule,
726 level + 1,
727 next_level_table,
728 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100729 regime,
Imre Kis703482d2023-11-30 15:51:26 +0100730 );
731
732 if next_level_table.iter().all(|d| !d.is_valid()) {
733 // Empty table
734 let mut page = unsafe {
735 Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
736 };
737 page.zero_init();
738 page_pool.release_pages(page).unwrap();
739 }
740 }
741 }
742 }
743
Imre Kisd5b96fd2024-09-11 17:04:32 +0200744 fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
Imre Kis703482d2023-11-30 15:51:26 +0100745 Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
746 }
747
748 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200749 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100750 granule: usize,
751 level: usize,
752 table: &mut [Descriptor],
753 ) -> &mut Descriptor {
754 // Get descriptor of the current level
Imre Kisd5b96fd2024-09-11 17:04:32 +0200755 let descriptor = &mut table[va.get_level_index(level)];
Imre Kis703482d2023-11-30 15:51:26 +0100756
757 if Self::GRANULE_SIZES[level] == granule {
758 return descriptor;
759 }
760
761 // Need to iterate forward
762 match descriptor.get_descriptor_type(level) {
763 DescriptorType::Invalid => {
764 panic!("Invalid descriptor");
765 }
766 DescriptorType::Block => {
767 panic!("Cannot split existing block descriptor to table");
768 }
Imre Kisd5b96fd2024-09-11 17:04:32 +0200769 DescriptorType::Table => {
770 Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
771 descriptor.get_next_level_table_mut(level)
772 })
773 }
Imre Kis703482d2023-11-30 15:51:26 +0100774 }
775 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100776
777 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
778 // SAFETY: The assembly code invalidates the translation table entry of
779 // the VA or all entries of the translation regime.
780 #[cfg(target_arch = "aarch64")]
781 unsafe {
782 if let Some(VirtualAddress(va)) = va {
783 match regime {
784 TranslationRegime::EL1_0(_, _) => {
785 core::arch::asm!(
786 "tlbi vaae1is, {0}
787 dsb nsh
788 isb",
789 in(reg) va)
790 }
791 #[cfg(target_feature = "vh")]
792 TranslationRegime::EL2_0(_, _) => {
793 core::arch::asm!(
794 "tlbi vaae1is, {0}
795 dsb nsh
796 isb",
797 in(reg) va)
798 }
799 TranslationRegime::EL2 => core::arch::asm!(
800 "tlbi vae2is, {0}
801 dsb nsh
802 isb",
803 in(reg) va),
804 TranslationRegime::EL3 => core::arch::asm!(
805 "tlbi vae3is, {0}
806 dsb nsh
807 isb",
808 in(reg) va),
809 }
810 } else {
811 match regime {
812 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
813 "tlbi aside1, {0}
814 dsb nsh
815 isb",
816 in(reg) (*asid as u64) << 48
817 ),
818 #[cfg(target_feature = "vh")]
819 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
820 "tlbi aside1, {0}
821 dsb nsh
822 isb",
823 in(reg) (*asid as u64) << 48
824 ),
825 TranslationRegime::EL2 => core::arch::asm!(
826 "tlbi alle2
827 dsb nsh
828 isb"
829 ),
830 TranslationRegime::EL3 => core::arch::asm!(
831 "tlbi alle3
832 dsb nsh
833 isb"
834 ),
835 }
836 }
837 }
838 }
Imre Kis703482d2023-11-30 15:51:26 +0100839}
840
Imre Kis42935a22024-10-17 11:30:16 +0200841#[cfg(test)]
842mod tests {
843 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100844
Imre Kisd5b96fd2024-09-11 17:04:32 +0200845 fn make_block(pa: usize, va: usize, granule: usize) -> Block {
846 Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
847 }
848
Imre Kis42935a22024-10-17 11:30:16 +0200849 #[test]
850 fn test_split_to_pages() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200851 let pages = Xlat::split_region_to_blocks(
852 PhysicalAddress(0x3fff_c000),
853 VirtualAddress(0x3fff_c000),
854 0x4020_5000,
855 )
856 .unwrap();
857 assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
858 assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
859 assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
860 assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
861 assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
862 assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
863 assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
Imre Kis42935a22024-10-17 11:30:16 +0200864 }
865
866 #[test]
867 fn test_split_to_pages_unaligned() {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200868 let pages = Xlat::split_region_to_blocks(
869 PhysicalAddress(0x3fff_c000),
870 VirtualAddress(0x3f20_0000),
871 0x200000,
872 )
873 .unwrap();
Imre Kis42935a22024-10-17 11:30:16 +0200874 for (i, block) in pages.iter().enumerate().take(512) {
875 assert_eq!(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200876 make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
Imre Kis42935a22024-10-17 11:30:16 +0200877 *block
878 );
879 }
Imre Kis703482d2023-11-30 15:51:26 +0100880 }
881}