blob: 846351dc7ed1f9dca09f1b48588c346a0b2e3276 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
Imre Kis5f960442024-11-29 16:49:43 +010010use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010011use core::iter::zip;
Imre Kis86fd04a2024-11-29 16:09:59 +010012use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010013
Imre Kisb5146b52024-10-31 14:03:06 +010014use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis703482d2023-11-30 15:51:26 +010015use alloc::format;
Imre Kis86fd04a2024-11-29 16:09:59 +010016use alloc::string::String;
17use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010018
19use bitflags::bitflags;
20use packed_struct::prelude::*;
21
22use self::descriptor::DescriptorType;
23
24use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
25use self::kernel_space::KernelSpace;
Imre Kis631127d2024-11-21 13:09:01 +010026use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010027use self::region::{PhysicalRegion, VirtualRegion};
28use self::region_pool::{Region, RegionPool, RegionPoolError};
29
Imre Kisd5b96fd2024-09-11 17:04:32 +020030pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010031mod block;
Imre Kis703482d2023-11-30 15:51:26 +010032mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010033mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010034pub mod kernel_space;
35pub mod page_pool;
36mod region;
37mod region_pool;
38
Imre Kis703482d2023-11-30 15:51:26 +010039/// Translation table error type
40#[derive(Debug)]
41pub enum XlatError {
42 InvalidParameterError(String),
43 AllocationError(String),
44 AlignmentError(String),
45 Overflow,
46 InvalidOperation(String),
47 Overlap,
48 NotFound,
49 RegionPoolError(RegionPoolError),
50}
51
52/// Memory attributes
53///
54/// MAIR_EL1 should be configured in the same way in startup.s
55#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
56pub enum MemoryAttributesIndex {
57 #[default]
58 Device_nGnRnE = 0x00,
59 Normal_IWBWA_OWBWA = 0x01,
60}
61
62bitflags! {
63 #[derive(Debug, Clone, Copy)]
64 pub struct MemoryAccessRights : u32 {
65 const R = 0b00000001;
66 const W = 0b00000010;
67 const X = 0b00000100;
68 const NS = 0b00001000;
69
70 const RW = Self::R.bits() | Self::W.bits();
71 const RX = Self::R.bits() | Self::X.bits();
72 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
73
74 const USER = 0b00010000;
75 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010076 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010077 }
78}
79
80impl From<MemoryAccessRights> for Attributes {
81 fn from(access_rights: MemoryAccessRights) -> Self {
82 let data_access_permissions = match (
83 access_rights.contains(MemoryAccessRights::USER),
84 access_rights.contains(MemoryAccessRights::W),
85 ) {
86 (false, false) => DataAccessPermissions::ReadOnly_None,
87 (false, true) => DataAccessPermissions::ReadWrite_None,
88 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
89 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
90 };
91
92 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
93 MemoryAttributesIndex::Device_nGnRnE
94 } else {
95 MemoryAttributesIndex::Normal_IWBWA_OWBWA
96 };
97
98 Attributes {
99 uxn: !access_rights.contains(MemoryAccessRights::X)
100 || !access_rights.contains(MemoryAccessRights::USER),
101 pxn: !access_rights.contains(MemoryAccessRights::X)
102 || access_rights.contains(MemoryAccessRights::USER),
103 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100104 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100105 access_flag: true,
106 shareability: Shareability::NonShareable,
107 data_access_permissions,
108 non_secure: access_rights.contains(MemoryAccessRights::NS),
109 mem_attr_index,
110 }
111 }
112}
113
Imre Kis5f960442024-11-29 16:49:43 +0100114#[derive(Debug)]
Imre Kisb5146b52024-10-31 14:03:06 +0100115pub enum RegimeVaRange {
116 Lower,
117 Upper,
118}
119
Imre Kis5f960442024-11-29 16:49:43 +0100120#[derive(Debug)]
Imre Kisb5146b52024-10-31 14:03:06 +0100121pub enum TranslationRegime {
122 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
123 #[cfg(target_feature = "vh")]
124 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
125 EL2, // EL2
126 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100127}
128
Imre Kis725ef5e2024-11-20 14:20:19 +0100129pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
130
Imre Kis631127d2024-11-21 13:09:01 +0100131pub struct Xlat<const VA_BITS: usize> {
132 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100133 page_pool: PagePool,
134 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100135 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100136 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100137}
138
139/// Memory translation table handling
140/// # High level interface
141/// * allocate and map zero initialized region (with or without VA)
142/// * allocate and map memory region and load contents (with or without VA)
143/// * map memory region by PA (with or without VA)
144/// * unmap memory region by PA
145/// * query PA by VA
146/// * set access rights of mapped memory areas
147/// * active mapping
148///
149/// # Debug features
150/// * print translation table details
151///
152/// # Region level interface
153/// * map regions
154/// * unmap region
155/// * find a mapped region which contains
156/// * find empty area for region
157/// * set access rights for a region
158/// * create blocks by region
159///
160/// # Block level interface
161/// * map block
162/// * unmap block
163/// * set access rights of block
Imre Kis631127d2024-11-21 13:09:01 +0100164impl<const VA_BITS: usize> Xlat<VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100165 pub fn new(
166 page_pool: PagePool,
167 address: VirtualAddressRange,
168 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100169 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100170 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100171 let initial_lookup_level = granule.initial_lookup_level();
172
173 let base_table = page_pool
174 .allocate_pages(
175 granule.table_size::<Descriptor>(initial_lookup_level),
176 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
177 )
178 .unwrap();
179
Imre Kis703482d2023-11-30 15:51:26 +0100180 let mut regions = RegionPool::new();
181 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100182 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100183 .unwrap();
184 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100185 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100186 page_pool,
187 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100188 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100189 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100190 }
191 }
192
193 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
194 /// initial data
195 /// # Arguments
196 /// * va: Virtual address of the memory area
197 /// * data: Data to be loaded to the memory area
198 /// * access_rights: Memory access rights of the area
199 /// # Return value
200 /// * Virtual address of the mapped memory
201 pub fn allocate_initalized_range(
202 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200203 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100204 data: &[u8],
205 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200206 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100207 let mut pages = self
208 .page_pool
209 .allocate_pages(data.len(), Some(self.granule as usize))
210 .map_err(|e| {
211 XlatError::AllocationError(format!(
212 "Cannot allocate pages for {} bytes ({:?})",
213 data.len(),
214 e
215 ))
216 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100217
218 pages.copy_data_to_page(data);
219
220 let pages_length = pages.length();
221 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
222 let region = if let Some(required_va) = va {
223 self.regions
224 .acquire(required_va, pages_length, physical_region)
225 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100226 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100227 }
228 .map_err(XlatError::RegionPoolError)?;
229
230 self.map_region(region, access_rights.into())
231 }
232
233 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
234 /// # Arguments
235 /// * va: Virtual address of the memory area
236 /// * length: Length of the memory area in bytes
237 /// * access_rights: Memory access rights of the area
238 /// # Return value
239 /// * Virtual address of the mapped memory
240 pub fn allocate_zero_init_range(
241 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200242 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100243 length: usize,
244 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200245 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100246 let mut pages = self
247 .page_pool
248 .allocate_pages(length, Some(self.granule as usize))
249 .map_err(|e| {
250 XlatError::AllocationError(format!(
251 "Cannot allocate pages for {length} bytes ({e:?})"
252 ))
253 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100254
255 pages.zero_init();
256
257 let pages_length = pages.length();
258 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
259 let region = if let Some(required_va) = va {
260 self.regions
261 .acquire(required_va, pages_length, physical_region)
262 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100263 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100264 }
265 .map_err(XlatError::RegionPoolError)?;
266
267 self.map_region(region, access_rights.into())
268 }
269
270 /// Map memory area by physical address
271 /// # Arguments
272 /// * va: Virtual address of the memory area
273 /// * pa: Physical address of the memory area
274 /// * length: Length of the memory area in bytes
275 /// * access_rights: Memory access rights of the area
276 /// # Return value
277 /// * Virtual address of the mapped memory
278 pub fn map_physical_address_range(
279 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200280 va: Option<VirtualAddress>,
281 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100282 length: usize,
283 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200284 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100285 let resource = PhysicalRegion::PhysicalAddress(pa);
286 let region = if let Some(required_va) = va {
287 self.regions.acquire(required_va, length, resource)
288 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100289 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100290 }
291 .map_err(XlatError::RegionPoolError)?;
292
293 self.map_region(region, access_rights.into())
294 }
295
296 /// Unmap memory area by virtual address
297 /// # Arguments
298 /// * va: Virtual address
299 /// * length: Length of the memory area in bytes
300 pub fn unmap_virtual_address_range(
301 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200302 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100303 length: usize,
304 ) -> Result<(), XlatError> {
305 let pa = self.get_pa_by_va(va, length)?;
306
307 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
308
309 self.unmap_region(&region_to_release)?;
310
311 self.regions
312 .release(region_to_release)
313 .map_err(XlatError::RegionPoolError)
314 }
315
316 /// Query physical address by virtual address range. Only returns a value if the memory area
317 /// mapped as continuous area.
318 /// # Arguments
319 /// * va: Virtual address of the memory area
320 /// * length: Length of the memory area in bytes
321 /// # Return value
322 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200323 pub fn get_pa_by_va(
324 &self,
325 va: VirtualAddress,
326 length: usize,
327 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100328 let containing_region = self
329 .find_containing_region(va, length)
330 .ok_or(XlatError::NotFound)?;
331
332 if !containing_region.used() {
333 return Err(XlatError::NotFound);
334 }
335
336 Ok(containing_region.get_pa_for_va(va))
337 }
338
339 /// Sets the memory access right of memory area
340 /// # Arguments
341 /// * va: Virtual address of the memory area
342 /// * length: Length of the memory area in bytes
343 /// * access_rights: New memory access rights of the area
344 pub fn set_access_rights(
345 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200346 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100347 length: usize,
348 access_rights: MemoryAccessRights,
349 ) -> Result<(), XlatError> {
350 let containing_region = self
351 .find_containing_region(va, length)
352 .ok_or(XlatError::NotFound)?;
353
354 if !containing_region.used() {
355 return Err(XlatError::NotFound);
356 }
357
358 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
359 self.map_region(region, access_rights.into())?;
360
361 Ok(())
362 }
363
364 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100365 ///
366 /// # Safety
367 /// When activating memory mapping for the running exception level, the
368 /// caller must ensure that the new mapping will not break any existing
369 /// references. After activation the caller must ensure that there are no
370 /// active references when unmapping memory.
371 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100372 // Select translation granule
373 let is_tg0 = match &self.regime {
374 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
375 | TranslationRegime::EL2
376 | TranslationRegime::EL3 => true,
377 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
378 #[cfg(target_feature = "vh")]
379 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
380 #[cfg(target_feature = "vh")]
381 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
382 };
383
384 #[cfg(target_arch = "aarch64")]
385 if is_tg0 {
386 self.modify_tcr(|tcr| {
387 let tg0 = match self.granule {
388 TranslationGranule::Granule4k => 0b00,
389 TranslationGranule::Granule16k => 0b10,
390 TranslationGranule::Granule64k => 0b01,
391 };
392
393 (tcr & !(3 << 14)) | (tg0 << 14)
394 });
395 } else {
396 self.modify_tcr(|tcr| {
397 let tg1 = match self.granule {
398 TranslationGranule::Granule4k => 0b10,
399 TranslationGranule::Granule16k => 0b01,
400 TranslationGranule::Granule64k => 0b11,
401 };
402
403 (tcr & !(3 << 30)) | (tg1 << 30)
404 });
405 }
406
407 // Set translation table
408 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100409
Imre Kisb5146b52024-10-31 14:03:06 +0100410 #[cfg(target_arch = "aarch64")]
411 match &self.regime {
412 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
413 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100414 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100415 in(reg) ((*asid as u64) << 48) | base_table_pa),
416 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
417 "msr ttbr1_el1, {0}
418 isb",
419 in(reg) ((*asid as u64) << 48) | base_table_pa),
420 #[cfg(target_feature = "vh")]
421 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
422 "msr ttbr0_el2, {0}
423 isb",
424 in(reg) ((*asid as u64) << 48) | base_table_pa),
425 #[cfg(target_feature = "vh")]
426 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
427 "msr ttbr1_el2, {0}
428 isb",
429 in(reg) ((*asid as u64) << 48) | base_table_pa),
430 TranslationRegime::EL2 => core::arch::asm!(
431 "msr ttbr0_el2, {0}
432 isb",
433 in(reg) base_table_pa),
434 TranslationRegime::EL3 => core::arch::asm!(
435 "msr ttbr0_el3, {0}
436 isb",
437 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100438 }
Imre Kis703482d2023-11-30 15:51:26 +0100439 }
440
Imre Kis631127d2024-11-21 13:09:01 +0100441 /// Modifies the TCR register of the selected regime of the instance.
442 #[cfg(target_arch = "aarch64")]
443 unsafe fn modify_tcr<F>(&self, f: F)
444 where
445 F: Fn(u64) -> u64,
446 {
447 let mut tcr: u64;
448
449 match &self.regime {
450 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
451 "mrs {0}, tcr_el1
452 isb",
453 out(reg) tcr),
454 #[cfg(target_feature = "vh")]
455 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
456 "mrs {0}, tcr_el2
457 isb",
458 out(reg) tcr),
459 TranslationRegime::EL2 => core::arch::asm!(
460 "mrs {0}, tcr_el2
461 isb",
462 out(reg) tcr),
463 TranslationRegime::EL3 => core::arch::asm!(
464 "mrs {0}, tcr_el3
465 isb",
466 out(reg) tcr),
467 }
468
469 tcr = f(tcr);
470
471 match &self.regime {
472 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
473 "msr tcr_el1, {0}
474 isb",
475 in(reg) tcr),
476 #[cfg(target_feature = "vh")]
477 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
478 "msr tcr_el2, {0}
479 isb",
480 in(reg) tcr),
481 TranslationRegime::EL2 => core::arch::asm!(
482 "msr tcr_el2, {0}
483 isb",
484 in(reg) tcr),
485 TranslationRegime::EL3 => core::arch::asm!(
486 "msr tcr_el3, {0}
487 isb",
488 in(reg) tcr),
489 }
490 }
491
Imre Kis703482d2023-11-30 15:51:26 +0100492 /// Prints a single translation table to the debug console
493 /// # Arguments
494 /// * level: Level of the translation table
495 /// * va: Base virtual address of the table
496 /// * table: Table entries
Imre Kis5f960442024-11-29 16:49:43 +0100497 fn dump_table(
498 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100499 level: isize,
500 va: usize,
501 table: &[Descriptor],
502 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100503 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100504 let level_prefix = match level {
505 0 | 1 => "|-",
506 2 => "| |-",
507 _ => "| | |-",
508 };
509
Imre Kis631127d2024-11-21 13:09:01 +0100510 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100511 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100512 DescriptorType::Block => {
513 writeln!(
514 f,
515 "{} {:#010x} Block -> {:#010x}",
516 level_prefix,
517 va,
518 descriptor.get_block_output_address(granule, level).0
519 )?;
520 }
Imre Kis703482d2023-11-30 15:51:26 +0100521 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100522 let next_level_table =
523 unsafe { descriptor.get_next_level_table(granule, level) };
Imre Kis5f960442024-11-29 16:49:43 +0100524 writeln!(
525 f,
Imre Kis703482d2023-11-30 15:51:26 +0100526 "{} {:#010x} Table -> {:#010x}",
527 level_prefix,
528 va,
529 next_level_table.as_ptr() as usize
Imre Kis5f960442024-11-29 16:49:43 +0100530 )?;
531 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100532 }
533 _ => {}
534 }
535 }
Imre Kis5f960442024-11-29 16:49:43 +0100536
537 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100538 }
539
540 /// Adds memory region from the translation table. The function splits the region to blocks and
541 /// uses the block level functions to do the mapping.
542 /// # Arguments
543 /// * region: Memory region object
544 /// # Return value
545 /// * Virtual address of the mapped memory
546 fn map_region(
547 &mut self,
548 region: VirtualRegion,
549 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200550 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100551 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100552 region.get_pa(),
553 region.base(),
554 region.length(),
555 self.granule,
556 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100557 for block in blocks {
558 self.map_block(block, attributes.clone());
559 }
560
561 Ok(region.base())
562 }
563
564 /// Remove memory region from the translation table. The function splits the region to blocks
565 /// and uses the block level functions to do the unmapping.
566 /// # Arguments
567 /// * region: Memory region object
568 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100569 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100570 region.get_pa(),
571 region.base(),
572 region.length(),
573 self.granule,
574 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100575 for block in blocks {
576 self.unmap_block(block);
577 }
578
579 Ok(())
580 }
581
582 /// Find mapped region that contains the whole region
583 /// # Arguments
584 /// * region: Virtual address to look for
585 /// # Return value
586 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200587 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100588 self.regions.find_containing_region(va, length).ok()
589 }
590
Imre Kis703482d2023-11-30 15:51:26 +0100591 /// Add block to memory mapping
592 /// # Arguments
593 /// * block: Memory block that can be represented by a single translation table entry
594 /// * attributes: Memory block's permissions, flags
595 fn map_block(&mut self, block: Block, attributes: Attributes) {
596 Self::set_block_descriptor_recursively(
597 attributes,
598 block.pa,
599 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100600 block.size,
601 self.granule.initial_lookup_level(),
602 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100603 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100604 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100605 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100606 );
607 }
608
609 /// Adds the block descriptor to the translation table along all the intermediate tables the
610 /// reach the required granule.
611 /// # Arguments
612 /// * attributes: Memory block's permssions, flags
613 /// * pa: Physical address
614 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100615 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100616 /// * level: Translation table level
617 /// * table: Translation table on the given level
618 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100619 /// * regime: Translation regime
620 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100621 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100622 fn set_block_descriptor_recursively(
623 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200624 pa: PhysicalAddress,
625 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100626 block_size: usize,
627 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100628 table: &mut [Descriptor],
629 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100630 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100631 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100632 ) {
633 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100634 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100635
636 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100637 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100638 // Follow break-before-make sequence
639 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
640 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100641 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kis703482d2023-11-30 15:51:26 +0100642 return;
643 }
644
645 // Need to iterate forward
646 match descriptor.get_descriptor_type(level) {
647 DescriptorType::Invalid => {
Imre Kis631127d2024-11-21 13:09:01 +0100648 let mut page = page_pool
649 .allocate_pages(
650 granule.table_size::<Descriptor>(level + 1),
651 Some(granule.table_alignment::<Descriptor>(level + 1)),
652 )
653 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100654 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100655 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100656 descriptor.set_table_descriptor(level, next_table, None);
657 }
658 Self::set_block_descriptor_recursively(
659 attributes,
660 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100661 va.mask_for_level(granule, level),
662 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100663 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100664 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100665 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100666 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100667 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100668 )
669 }
670 DescriptorType::Block => {
671 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100672 let current_va = va.mask_for_level(granule, level);
673 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100674 let current_attributes = descriptor.get_block_attributes(level);
675
676 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100677
678 // Follow break-before-make sequence
679 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
680 Self::invalidate(regime, Some(current_va));
681
682 let mut page = page_pool
683 .allocate_pages(
684 granule.table_size::<Descriptor>(level + 1),
685 Some(granule.table_alignment::<Descriptor>(level + 1)),
686 )
687 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100688 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100689 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100690 descriptor.set_table_descriptor(level, next_table, None);
691 }
692
693 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200694 for exploded_va in VirtualAddressRange::new(
695 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100696 current_va
697 .add_offset(granule.block_size_at_level(level))
698 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200699 )
Imre Kis631127d2024-11-21 13:09:01 +0100700 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100701 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200702 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100703 Self::set_block_descriptor_recursively(
704 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200705 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100706 exploded_va.mask_for_level(granule, level),
707 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100708 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100709 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100710 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100711 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100712 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100713 )
714 }
715
716 // Invoke self to continue recursion on the newly created level
717 Self::set_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100718 attributes, pa, va, block_size, level, table, page_pool, regime, granule,
Imre Kis703482d2023-11-30 15:51:26 +0100719 );
720 }
721 DescriptorType::Table => Self::set_block_descriptor_recursively(
722 attributes,
723 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100724 va.mask_for_level(granule, level),
725 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100726 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100727 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100728 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100729 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100730 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100731 ),
732 }
733 }
734
735 /// Remove block from memory mapping
736 /// # Arguments
737 /// * block: memory block that can be represented by a single translation entry
738 fn unmap_block(&mut self, block: Block) {
739 Self::remove_block_descriptor_recursively(
740 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100741 block.size,
742 self.granule.initial_lookup_level(),
743 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100744 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100745 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100746 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100747 );
748 }
749
750 /// Removes block descriptor from the translation table along all the intermediate tables which
751 /// become empty during the removal process.
752 /// # Arguments
753 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100754 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100755 /// * level: Translation table level
756 /// * table: Translation table on the given level
757 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100758 /// * regime: Translation regime
759 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100760 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200761 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100762 block_size: usize,
763 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100764 table: &mut [Descriptor],
765 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100766 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100767 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100768 ) {
769 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100770 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100771
Imre Kis631127d2024-11-21 13:09:01 +0100772 // We reached the required level with the matching block size
773 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100774 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100775 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100776 return;
777 }
778
779 // Need to iterate forward
780 match descriptor.get_descriptor_type(level) {
781 DescriptorType::Invalid => {
782 panic!("Cannot remove block from non-existing table");
783 }
784 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100785 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100786 }
787 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100788 let next_level_table =
789 unsafe { descriptor.get_next_level_table_mut(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100790 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100791 va.mask_for_level(granule, level),
792 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100793 level + 1,
794 next_level_table,
795 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100796 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100797 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100798 );
799
800 if next_level_table.iter().all(|d| !d.is_valid()) {
801 // Empty table
802 let mut page = unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100803 Pages::from_slice(
804 descriptor.set_table_descriptor_to_invalid(granule, level),
805 )
Imre Kis703482d2023-11-30 15:51:26 +0100806 };
807 page.zero_init();
808 page_pool.release_pages(page).unwrap();
809 }
810 }
811 }
812 }
813
Imre Kis631127d2024-11-21 13:09:01 +0100814 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
815 Self::walk_descriptors(
816 va,
817 block_size,
818 self.granule.initial_lookup_level(),
819 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
820 self.granule,
821 )
Imre Kis703482d2023-11-30 15:51:26 +0100822 }
823
824 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200825 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100826 block_size: usize,
827 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100828 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100829 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100830 ) -> &mut Descriptor {
831 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100832 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100833
Imre Kis631127d2024-11-21 13:09:01 +0100834 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100835 return descriptor;
836 }
837
838 // Need to iterate forward
839 match descriptor.get_descriptor_type(level) {
840 DescriptorType::Invalid => {
841 panic!("Invalid descriptor");
842 }
843 DescriptorType::Block => {
844 panic!("Cannot split existing block descriptor to table");
845 }
Imre Kis631127d2024-11-21 13:09:01 +0100846 DescriptorType::Table => Self::walk_descriptors(
847 va.mask_for_level(granule, level),
848 block_size,
849 level + 1,
850 unsafe { descriptor.get_next_level_table_mut(granule, level) },
851 granule,
852 ),
Imre Kis703482d2023-11-30 15:51:26 +0100853 }
854 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100855
856 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
857 // SAFETY: The assembly code invalidates the translation table entry of
858 // the VA or all entries of the translation regime.
859 #[cfg(target_arch = "aarch64")]
860 unsafe {
861 if let Some(VirtualAddress(va)) = va {
862 match regime {
863 TranslationRegime::EL1_0(_, _) => {
864 core::arch::asm!(
865 "tlbi vaae1is, {0}
866 dsb nsh
867 isb",
868 in(reg) va)
869 }
870 #[cfg(target_feature = "vh")]
871 TranslationRegime::EL2_0(_, _) => {
872 core::arch::asm!(
873 "tlbi vaae1is, {0}
874 dsb nsh
875 isb",
876 in(reg) va)
877 }
878 TranslationRegime::EL2 => core::arch::asm!(
879 "tlbi vae2is, {0}
880 dsb nsh
881 isb",
882 in(reg) va),
883 TranslationRegime::EL3 => core::arch::asm!(
884 "tlbi vae3is, {0}
885 dsb nsh
886 isb",
887 in(reg) va),
888 }
889 } else {
890 match regime {
891 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
892 "tlbi aside1, {0}
893 dsb nsh
894 isb",
895 in(reg) (*asid as u64) << 48
896 ),
897 #[cfg(target_feature = "vh")]
898 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
899 "tlbi aside1, {0}
900 dsb nsh
901 isb",
902 in(reg) (*asid as u64) << 48
903 ),
904 TranslationRegime::EL2 => core::arch::asm!(
905 "tlbi alle2
906 dsb nsh
907 isb"
908 ),
909 TranslationRegime::EL3 => core::arch::asm!(
910 "tlbi alle3
911 dsb nsh
912 isb"
913 ),
914 }
915 }
916 }
917 }
Imre Kis703482d2023-11-30 15:51:26 +0100918}
Imre Kis5f960442024-11-29 16:49:43 +0100919
920impl<const VA_BITS: usize> fmt::Debug for Xlat<VA_BITS> {
921 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
922 f.debug_struct("Xlat")
923 .field("regime", &self.regime)
924 .field("granule", &self.granule)
925 .field("VA_BITS", &VA_BITS)
926 .field("base_table", &self.base_table.get_pa())
927 .finish()?;
928
929 Self::dump_table(
930 f,
931 self.granule.initial_lookup_level(),
932 0,
933 unsafe { self.base_table.get_as_slice() },
934 self.granule,
935 )?;
936
937 Ok(())
938 }
939}