blob: e8bd6eb3042bd3f9fdc3009ba67a2dfd093d45cd [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
Imre Kis86fd04a2024-11-29 16:09:59 +010011use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010012
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis703482d2023-11-30 15:51:26 +010014use alloc::format;
Imre Kis86fd04a2024-11-29 16:09:59 +010015use alloc::string::String;
16use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010017use log::debug;
18
19use bitflags::bitflags;
20use packed_struct::prelude::*;
21
22use self::descriptor::DescriptorType;
23
24use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
25use self::kernel_space::KernelSpace;
Imre Kis631127d2024-11-21 13:09:01 +010026use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010027use self::region::{PhysicalRegion, VirtualRegion};
28use self::region_pool::{Region, RegionPool, RegionPoolError};
29
Imre Kisd5b96fd2024-09-11 17:04:32 +020030pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010031mod block;
Imre Kis703482d2023-11-30 15:51:26 +010032mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010033mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010034pub mod kernel_space;
35pub mod page_pool;
36mod region;
37mod region_pool;
38
Imre Kis703482d2023-11-30 15:51:26 +010039/// Translation table error type
40#[derive(Debug)]
41pub enum XlatError {
42 InvalidParameterError(String),
43 AllocationError(String),
44 AlignmentError(String),
45 Overflow,
46 InvalidOperation(String),
47 Overlap,
48 NotFound,
49 RegionPoolError(RegionPoolError),
50}
51
52/// Memory attributes
53///
54/// MAIR_EL1 should be configured in the same way in startup.s
55#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
56pub enum MemoryAttributesIndex {
57 #[default]
58 Device_nGnRnE = 0x00,
59 Normal_IWBWA_OWBWA = 0x01,
60}
61
62bitflags! {
63 #[derive(Debug, Clone, Copy)]
64 pub struct MemoryAccessRights : u32 {
65 const R = 0b00000001;
66 const W = 0b00000010;
67 const X = 0b00000100;
68 const NS = 0b00001000;
69
70 const RW = Self::R.bits() | Self::W.bits();
71 const RX = Self::R.bits() | Self::X.bits();
72 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
73
74 const USER = 0b00010000;
75 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010076 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010077 }
78}
79
80impl From<MemoryAccessRights> for Attributes {
81 fn from(access_rights: MemoryAccessRights) -> Self {
82 let data_access_permissions = match (
83 access_rights.contains(MemoryAccessRights::USER),
84 access_rights.contains(MemoryAccessRights::W),
85 ) {
86 (false, false) => DataAccessPermissions::ReadOnly_None,
87 (false, true) => DataAccessPermissions::ReadWrite_None,
88 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
89 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
90 };
91
92 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
93 MemoryAttributesIndex::Device_nGnRnE
94 } else {
95 MemoryAttributesIndex::Normal_IWBWA_OWBWA
96 };
97
98 Attributes {
99 uxn: !access_rights.contains(MemoryAccessRights::X)
100 || !access_rights.contains(MemoryAccessRights::USER),
101 pxn: !access_rights.contains(MemoryAccessRights::X)
102 || access_rights.contains(MemoryAccessRights::USER),
103 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100104 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100105 access_flag: true,
106 shareability: Shareability::NonShareable,
107 data_access_permissions,
108 non_secure: access_rights.contains(MemoryAccessRights::NS),
109 mem_attr_index,
110 }
111 }
112}
113
Imre Kisb5146b52024-10-31 14:03:06 +0100114pub enum RegimeVaRange {
115 Lower,
116 Upper,
117}
118
119pub enum TranslationRegime {
120 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
121 #[cfg(target_feature = "vh")]
122 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
123 EL2, // EL2
124 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100125}
126
Imre Kis725ef5e2024-11-20 14:20:19 +0100127pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
128
Imre Kis631127d2024-11-21 13:09:01 +0100129pub struct Xlat<const VA_BITS: usize> {
130 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100131 page_pool: PagePool,
132 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100133 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100134 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100135}
136
137/// Memory translation table handling
138/// # High level interface
139/// * allocate and map zero initialized region (with or without VA)
140/// * allocate and map memory region and load contents (with or without VA)
141/// * map memory region by PA (with or without VA)
142/// * unmap memory region by PA
143/// * query PA by VA
144/// * set access rights of mapped memory areas
145/// * active mapping
146///
147/// # Debug features
148/// * print translation table details
149///
150/// # Region level interface
151/// * map regions
152/// * unmap region
153/// * find a mapped region which contains
154/// * find empty area for region
155/// * set access rights for a region
156/// * create blocks by region
157///
158/// # Block level interface
159/// * map block
160/// * unmap block
161/// * set access rights of block
Imre Kis631127d2024-11-21 13:09:01 +0100162impl<const VA_BITS: usize> Xlat<VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100163 pub fn new(
164 page_pool: PagePool,
165 address: VirtualAddressRange,
166 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100167 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100168 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100169 let initial_lookup_level = granule.initial_lookup_level();
170
171 let base_table = page_pool
172 .allocate_pages(
173 granule.table_size::<Descriptor>(initial_lookup_level),
174 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
175 )
176 .unwrap();
177
Imre Kis703482d2023-11-30 15:51:26 +0100178 let mut regions = RegionPool::new();
179 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100180 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100181 .unwrap();
182 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100183 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100184 page_pool,
185 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100186 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100187 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100188 }
189 }
190
191 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
192 /// initial data
193 /// # Arguments
194 /// * va: Virtual address of the memory area
195 /// * data: Data to be loaded to the memory area
196 /// * access_rights: Memory access rights of the area
197 /// # Return value
198 /// * Virtual address of the mapped memory
199 pub fn allocate_initalized_range(
200 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200201 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100202 data: &[u8],
203 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200204 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100205 let mut pages = self
206 .page_pool
207 .allocate_pages(data.len(), Some(self.granule as usize))
208 .map_err(|e| {
209 XlatError::AllocationError(format!(
210 "Cannot allocate pages for {} bytes ({:?})",
211 data.len(),
212 e
213 ))
214 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100215
216 pages.copy_data_to_page(data);
217
218 let pages_length = pages.length();
219 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
220 let region = if let Some(required_va) = va {
221 self.regions
222 .acquire(required_va, pages_length, physical_region)
223 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100224 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100225 }
226 .map_err(XlatError::RegionPoolError)?;
227
228 self.map_region(region, access_rights.into())
229 }
230
231 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
232 /// # Arguments
233 /// * va: Virtual address of the memory area
234 /// * length: Length of the memory area in bytes
235 /// * access_rights: Memory access rights of the area
236 /// # Return value
237 /// * Virtual address of the mapped memory
238 pub fn allocate_zero_init_range(
239 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200240 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100241 length: usize,
242 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200243 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100244 let mut pages = self
245 .page_pool
246 .allocate_pages(length, Some(self.granule as usize))
247 .map_err(|e| {
248 XlatError::AllocationError(format!(
249 "Cannot allocate pages for {length} bytes ({e:?})"
250 ))
251 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100252
253 pages.zero_init();
254
255 let pages_length = pages.length();
256 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
257 let region = if let Some(required_va) = va {
258 self.regions
259 .acquire(required_va, pages_length, physical_region)
260 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100261 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100262 }
263 .map_err(XlatError::RegionPoolError)?;
264
265 self.map_region(region, access_rights.into())
266 }
267
268 /// Map memory area by physical address
269 /// # Arguments
270 /// * va: Virtual address of the memory area
271 /// * pa: Physical address of the memory area
272 /// * length: Length of the memory area in bytes
273 /// * access_rights: Memory access rights of the area
274 /// # Return value
275 /// * Virtual address of the mapped memory
276 pub fn map_physical_address_range(
277 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200278 va: Option<VirtualAddress>,
279 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100280 length: usize,
281 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200282 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100283 let resource = PhysicalRegion::PhysicalAddress(pa);
284 let region = if let Some(required_va) = va {
285 self.regions.acquire(required_va, length, resource)
286 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100287 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100288 }
289 .map_err(XlatError::RegionPoolError)?;
290
291 self.map_region(region, access_rights.into())
292 }
293
294 /// Unmap memory area by virtual address
295 /// # Arguments
296 /// * va: Virtual address
297 /// * length: Length of the memory area in bytes
298 pub fn unmap_virtual_address_range(
299 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200300 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100301 length: usize,
302 ) -> Result<(), XlatError> {
303 let pa = self.get_pa_by_va(va, length)?;
304
305 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
306
307 self.unmap_region(&region_to_release)?;
308
309 self.regions
310 .release(region_to_release)
311 .map_err(XlatError::RegionPoolError)
312 }
313
314 /// Query physical address by virtual address range. Only returns a value if the memory area
315 /// mapped as continuous area.
316 /// # Arguments
317 /// * va: Virtual address of the memory area
318 /// * length: Length of the memory area in bytes
319 /// # Return value
320 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200321 pub fn get_pa_by_va(
322 &self,
323 va: VirtualAddress,
324 length: usize,
325 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100326 let containing_region = self
327 .find_containing_region(va, length)
328 .ok_or(XlatError::NotFound)?;
329
330 if !containing_region.used() {
331 return Err(XlatError::NotFound);
332 }
333
334 Ok(containing_region.get_pa_for_va(va))
335 }
336
337 /// Sets the memory access right of memory area
338 /// # Arguments
339 /// * va: Virtual address of the memory area
340 /// * length: Length of the memory area in bytes
341 /// * access_rights: New memory access rights of the area
342 pub fn set_access_rights(
343 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200344 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100345 length: usize,
346 access_rights: MemoryAccessRights,
347 ) -> Result<(), XlatError> {
348 let containing_region = self
349 .find_containing_region(va, length)
350 .ok_or(XlatError::NotFound)?;
351
352 if !containing_region.used() {
353 return Err(XlatError::NotFound);
354 }
355
356 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
357 self.map_region(region, access_rights.into())?;
358
359 Ok(())
360 }
361
362 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100363 ///
364 /// # Safety
365 /// When activating memory mapping for the running exception level, the
366 /// caller must ensure that the new mapping will not break any existing
367 /// references. After activation the caller must ensure that there are no
368 /// active references when unmapping memory.
369 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100370 // Select translation granule
371 let is_tg0 = match &self.regime {
372 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
373 | TranslationRegime::EL2
374 | TranslationRegime::EL3 => true,
375 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
376 #[cfg(target_feature = "vh")]
377 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
378 #[cfg(target_feature = "vh")]
379 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
380 };
381
382 #[cfg(target_arch = "aarch64")]
383 if is_tg0 {
384 self.modify_tcr(|tcr| {
385 let tg0 = match self.granule {
386 TranslationGranule::Granule4k => 0b00,
387 TranslationGranule::Granule16k => 0b10,
388 TranslationGranule::Granule64k => 0b01,
389 };
390
391 (tcr & !(3 << 14)) | (tg0 << 14)
392 });
393 } else {
394 self.modify_tcr(|tcr| {
395 let tg1 = match self.granule {
396 TranslationGranule::Granule4k => 0b10,
397 TranslationGranule::Granule16k => 0b01,
398 TranslationGranule::Granule64k => 0b11,
399 };
400
401 (tcr & !(3 << 30)) | (tg1 << 30)
402 });
403 }
404
405 // Set translation table
406 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100407
Imre Kisb5146b52024-10-31 14:03:06 +0100408 #[cfg(target_arch = "aarch64")]
409 match &self.regime {
410 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
411 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100412 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100413 in(reg) ((*asid as u64) << 48) | base_table_pa),
414 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
415 "msr ttbr1_el1, {0}
416 isb",
417 in(reg) ((*asid as u64) << 48) | base_table_pa),
418 #[cfg(target_feature = "vh")]
419 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
420 "msr ttbr0_el2, {0}
421 isb",
422 in(reg) ((*asid as u64) << 48) | base_table_pa),
423 #[cfg(target_feature = "vh")]
424 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
425 "msr ttbr1_el2, {0}
426 isb",
427 in(reg) ((*asid as u64) << 48) | base_table_pa),
428 TranslationRegime::EL2 => core::arch::asm!(
429 "msr ttbr0_el2, {0}
430 isb",
431 in(reg) base_table_pa),
432 TranslationRegime::EL3 => core::arch::asm!(
433 "msr ttbr0_el3, {0}
434 isb",
435 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100436 }
Imre Kis703482d2023-11-30 15:51:26 +0100437 }
438
Imre Kis631127d2024-11-21 13:09:01 +0100439 /// Modifies the TCR register of the selected regime of the instance.
440 #[cfg(target_arch = "aarch64")]
441 unsafe fn modify_tcr<F>(&self, f: F)
442 where
443 F: Fn(u64) -> u64,
444 {
445 let mut tcr: u64;
446
447 match &self.regime {
448 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
449 "mrs {0}, tcr_el1
450 isb",
451 out(reg) tcr),
452 #[cfg(target_feature = "vh")]
453 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
454 "mrs {0}, tcr_el2
455 isb",
456 out(reg) tcr),
457 TranslationRegime::EL2 => core::arch::asm!(
458 "mrs {0}, tcr_el2
459 isb",
460 out(reg) tcr),
461 TranslationRegime::EL3 => core::arch::asm!(
462 "mrs {0}, tcr_el3
463 isb",
464 out(reg) tcr),
465 }
466
467 tcr = f(tcr);
468
469 match &self.regime {
470 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
471 "msr tcr_el1, {0}
472 isb",
473 in(reg) tcr),
474 #[cfg(target_feature = "vh")]
475 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
476 "msr tcr_el2, {0}
477 isb",
478 in(reg) tcr),
479 TranslationRegime::EL2 => core::arch::asm!(
480 "msr tcr_el2, {0}
481 isb",
482 in(reg) tcr),
483 TranslationRegime::EL3 => core::arch::asm!(
484 "msr tcr_el3, {0}
485 isb",
486 in(reg) tcr),
487 }
488 }
489
Imre Kis703482d2023-11-30 15:51:26 +0100490 /// Prints the translation tables to debug console recursively
491 pub fn print(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100492 debug!("Xlat table -> {:#010x}", self.base_table.get_pa().0 as u64);
493 Self::print_table(
494 self.granule.initial_lookup_level(),
495 0,
496 unsafe { self.base_table.get_as_slice() },
497 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100498 );
Imre Kis703482d2023-11-30 15:51:26 +0100499 }
500
501 /// Prints a single translation table to the debug console
502 /// # Arguments
503 /// * level: Level of the translation table
504 /// * va: Base virtual address of the table
505 /// * table: Table entries
Imre Kis631127d2024-11-21 13:09:01 +0100506 pub fn print_table(
507 level: isize,
508 va: usize,
509 table: &[Descriptor],
510 granule: TranslationGranule<VA_BITS>,
511 ) {
Imre Kis703482d2023-11-30 15:51:26 +0100512 let level_prefix = match level {
513 0 | 1 => "|-",
514 2 => "| |-",
515 _ => "| | |-",
516 };
517
Imre Kis631127d2024-11-21 13:09:01 +0100518 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100519 match descriptor.get_descriptor_type(level) {
520 DescriptorType::Block => debug!(
521 "{} {:#010x} Block -> {:#010x}",
522 level_prefix,
523 va,
Imre Kis631127d2024-11-21 13:09:01 +0100524 descriptor.get_block_output_address(granule, level).0
Imre Kis703482d2023-11-30 15:51:26 +0100525 ),
526 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100527 let next_level_table =
528 unsafe { descriptor.get_next_level_table(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100529 debug!(
530 "{} {:#010x} Table -> {:#010x}",
531 level_prefix,
532 va,
533 next_level_table.as_ptr() as usize
534 );
Imre Kis631127d2024-11-21 13:09:01 +0100535 Self::print_table(level + 1, va, next_level_table, granule);
Imre Kis703482d2023-11-30 15:51:26 +0100536 }
537 _ => {}
538 }
539 }
540 }
541
542 /// Adds memory region from the translation table. The function splits the region to blocks and
543 /// uses the block level functions to do the mapping.
544 /// # Arguments
545 /// * region: Memory region object
546 /// # Return value
547 /// * Virtual address of the mapped memory
548 fn map_region(
549 &mut self,
550 region: VirtualRegion,
551 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200552 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100553 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100554 region.get_pa(),
555 region.base(),
556 region.length(),
557 self.granule,
558 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100559 for block in blocks {
560 self.map_block(block, attributes.clone());
561 }
562
563 Ok(region.base())
564 }
565
566 /// Remove memory region from the translation table. The function splits the region to blocks
567 /// and uses the block level functions to do the unmapping.
568 /// # Arguments
569 /// * region: Memory region object
570 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100571 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100572 region.get_pa(),
573 region.base(),
574 region.length(),
575 self.granule,
576 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100577 for block in blocks {
578 self.unmap_block(block);
579 }
580
581 Ok(())
582 }
583
584 /// Find mapped region that contains the whole region
585 /// # Arguments
586 /// * region: Virtual address to look for
587 /// # Return value
588 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200589 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100590 self.regions.find_containing_region(va, length).ok()
591 }
592
Imre Kis703482d2023-11-30 15:51:26 +0100593 /// Add block to memory mapping
594 /// # Arguments
595 /// * block: Memory block that can be represented by a single translation table entry
596 /// * attributes: Memory block's permissions, flags
597 fn map_block(&mut self, block: Block, attributes: Attributes) {
598 Self::set_block_descriptor_recursively(
599 attributes,
600 block.pa,
601 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100602 block.size,
603 self.granule.initial_lookup_level(),
604 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100605 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100606 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100607 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100608 );
609 }
610
611 /// Adds the block descriptor to the translation table along all the intermediate tables the
612 /// reach the required granule.
613 /// # Arguments
614 /// * attributes: Memory block's permssions, flags
615 /// * pa: Physical address
616 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100617 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100618 /// * level: Translation table level
619 /// * table: Translation table on the given level
620 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100621 /// * regime: Translation regime
622 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100623 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100624 fn set_block_descriptor_recursively(
625 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200626 pa: PhysicalAddress,
627 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100628 block_size: usize,
629 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100630 table: &mut [Descriptor],
631 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100632 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100633 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100634 ) {
635 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100636 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100637
638 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100639 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100640 // Follow break-before-make sequence
641 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
642 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100643 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kis703482d2023-11-30 15:51:26 +0100644 return;
645 }
646
647 // Need to iterate forward
648 match descriptor.get_descriptor_type(level) {
649 DescriptorType::Invalid => {
Imre Kis631127d2024-11-21 13:09:01 +0100650 let mut page = page_pool
651 .allocate_pages(
652 granule.table_size::<Descriptor>(level + 1),
653 Some(granule.table_alignment::<Descriptor>(level + 1)),
654 )
655 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100656 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100657 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100658 descriptor.set_table_descriptor(level, next_table, None);
659 }
660 Self::set_block_descriptor_recursively(
661 attributes,
662 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100663 va.mask_for_level(granule, level),
664 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100665 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100666 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100667 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100668 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100669 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100670 )
671 }
672 DescriptorType::Block => {
673 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100674 let current_va = va.mask_for_level(granule, level);
675 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100676 let current_attributes = descriptor.get_block_attributes(level);
677
678 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100679
680 // Follow break-before-make sequence
681 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
682 Self::invalidate(regime, Some(current_va));
683
684 let mut page = page_pool
685 .allocate_pages(
686 granule.table_size::<Descriptor>(level + 1),
687 Some(granule.table_alignment::<Descriptor>(level + 1)),
688 )
689 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100690 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100691 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100692 descriptor.set_table_descriptor(level, next_table, None);
693 }
694
695 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200696 for exploded_va in VirtualAddressRange::new(
697 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100698 current_va
699 .add_offset(granule.block_size_at_level(level))
700 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200701 )
Imre Kis631127d2024-11-21 13:09:01 +0100702 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100703 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200704 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100705 Self::set_block_descriptor_recursively(
706 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200707 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100708 exploded_va.mask_for_level(granule, level),
709 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100710 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100711 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100712 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100713 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100714 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100715 )
716 }
717
718 // Invoke self to continue recursion on the newly created level
719 Self::set_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100720 attributes, pa, va, block_size, level, table, page_pool, regime, granule,
Imre Kis703482d2023-11-30 15:51:26 +0100721 );
722 }
723 DescriptorType::Table => Self::set_block_descriptor_recursively(
724 attributes,
725 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100726 va.mask_for_level(granule, level),
727 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100728 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100729 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100730 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100731 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100732 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100733 ),
734 }
735 }
736
737 /// Remove block from memory mapping
738 /// # Arguments
739 /// * block: memory block that can be represented by a single translation entry
740 fn unmap_block(&mut self, block: Block) {
741 Self::remove_block_descriptor_recursively(
742 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100743 block.size,
744 self.granule.initial_lookup_level(),
745 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100746 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100747 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100748 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100749 );
750 }
751
752 /// Removes block descriptor from the translation table along all the intermediate tables which
753 /// become empty during the removal process.
754 /// # Arguments
755 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100756 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100757 /// * level: Translation table level
758 /// * table: Translation table on the given level
759 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100760 /// * regime: Translation regime
761 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100762 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200763 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100764 block_size: usize,
765 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100766 table: &mut [Descriptor],
767 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100768 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100769 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100770 ) {
771 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100772 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100773
Imre Kis631127d2024-11-21 13:09:01 +0100774 // We reached the required level with the matching block size
775 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100776 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100777 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100778 return;
779 }
780
781 // Need to iterate forward
782 match descriptor.get_descriptor_type(level) {
783 DescriptorType::Invalid => {
784 panic!("Cannot remove block from non-existing table");
785 }
786 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100787 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100788 }
789 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100790 let next_level_table =
791 unsafe { descriptor.get_next_level_table_mut(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100792 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100793 va.mask_for_level(granule, level),
794 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100795 level + 1,
796 next_level_table,
797 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100798 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100799 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100800 );
801
802 if next_level_table.iter().all(|d| !d.is_valid()) {
803 // Empty table
804 let mut page = unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100805 Pages::from_slice(
806 descriptor.set_table_descriptor_to_invalid(granule, level),
807 )
Imre Kis703482d2023-11-30 15:51:26 +0100808 };
809 page.zero_init();
810 page_pool.release_pages(page).unwrap();
811 }
812 }
813 }
814 }
815
Imre Kis631127d2024-11-21 13:09:01 +0100816 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
817 Self::walk_descriptors(
818 va,
819 block_size,
820 self.granule.initial_lookup_level(),
821 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
822 self.granule,
823 )
Imre Kis703482d2023-11-30 15:51:26 +0100824 }
825
826 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200827 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100828 block_size: usize,
829 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100830 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100831 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100832 ) -> &mut Descriptor {
833 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100834 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100835
Imre Kis631127d2024-11-21 13:09:01 +0100836 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100837 return descriptor;
838 }
839
840 // Need to iterate forward
841 match descriptor.get_descriptor_type(level) {
842 DescriptorType::Invalid => {
843 panic!("Invalid descriptor");
844 }
845 DescriptorType::Block => {
846 panic!("Cannot split existing block descriptor to table");
847 }
Imre Kis631127d2024-11-21 13:09:01 +0100848 DescriptorType::Table => Self::walk_descriptors(
849 va.mask_for_level(granule, level),
850 block_size,
851 level + 1,
852 unsafe { descriptor.get_next_level_table_mut(granule, level) },
853 granule,
854 ),
Imre Kis703482d2023-11-30 15:51:26 +0100855 }
856 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100857
858 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
859 // SAFETY: The assembly code invalidates the translation table entry of
860 // the VA or all entries of the translation regime.
861 #[cfg(target_arch = "aarch64")]
862 unsafe {
863 if let Some(VirtualAddress(va)) = va {
864 match regime {
865 TranslationRegime::EL1_0(_, _) => {
866 core::arch::asm!(
867 "tlbi vaae1is, {0}
868 dsb nsh
869 isb",
870 in(reg) va)
871 }
872 #[cfg(target_feature = "vh")]
873 TranslationRegime::EL2_0(_, _) => {
874 core::arch::asm!(
875 "tlbi vaae1is, {0}
876 dsb nsh
877 isb",
878 in(reg) va)
879 }
880 TranslationRegime::EL2 => core::arch::asm!(
881 "tlbi vae2is, {0}
882 dsb nsh
883 isb",
884 in(reg) va),
885 TranslationRegime::EL3 => core::arch::asm!(
886 "tlbi vae3is, {0}
887 dsb nsh
888 isb",
889 in(reg) va),
890 }
891 } else {
892 match regime {
893 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
894 "tlbi aside1, {0}
895 dsb nsh
896 isb",
897 in(reg) (*asid as u64) << 48
898 ),
899 #[cfg(target_feature = "vh")]
900 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
901 "tlbi aside1, {0}
902 dsb nsh
903 isb",
904 in(reg) (*asid as u64) << 48
905 ),
906 TranslationRegime::EL2 => core::arch::asm!(
907 "tlbi alle2
908 dsb nsh
909 isb"
910 ),
911 TranslationRegime::EL3 => core::arch::asm!(
912 "tlbi alle3
913 dsb nsh
914 isb"
915 ),
916 }
917 }
918 }
919 }
Imre Kis703482d2023-11-30 15:51:26 +0100920}