blob: 63f1e4b8ffb228e04561c429dbbe40a143e9723f [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
6
7extern crate alloc;
8
Imre Kis5f960442024-11-29 16:49:43 +01009use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
Imre Kis21d7f722025-01-17 17:55:35 +010011use core::marker::PhantomData;
Imre Kis86fd04a2024-11-29 16:09:59 +010012use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010013
Imre Kisb5146b52024-10-31 14:03:06 +010014use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010015use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010016
17use bitflags::bitflags;
18use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010019use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010020
21use self::descriptor::DescriptorType;
22
23use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
Imre Kis631127d2024-11-21 13:09:01 +010024use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010025use self::region::{PhysicalRegion, VirtualRegion};
26use self::region_pool::{Region, RegionPool, RegionPoolError};
27
Imre Kisd5b96fd2024-09-11 17:04:32 +020028pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010029mod block;
Imre Kis703482d2023-11-30 15:51:26 +010030mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010031mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010032pub mod kernel_space;
33pub mod page_pool;
34mod region;
35mod region_pool;
36
Imre Kis703482d2023-11-30 15:51:26 +010037/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010038#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010039pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010040 #[error("Invalid parameter: {0}")]
41 InvalidParameterError(&'static str),
42 #[error("Cannot allocate {1}: {0:?}")]
43 PageAllocationError(RegionPoolError, usize),
44 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
45 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
46 #[error("Entry not found for {0:?}")]
47 VaNotFound(VirtualAddress),
48 #[error("Cannot allocate virtual address {0:?}")]
49 VaAllocationError(RegionPoolError),
50 #[error("Cannot release virtual address {1:?}: {0:?}")]
51 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010052}
53
54/// Memory attributes
55///
56/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010057#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010058#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
59pub enum MemoryAttributesIndex {
60 #[default]
61 Device_nGnRnE = 0x00,
62 Normal_IWBWA_OWBWA = 0x01,
63}
64
65bitflags! {
66 #[derive(Debug, Clone, Copy)]
67 pub struct MemoryAccessRights : u32 {
68 const R = 0b00000001;
69 const W = 0b00000010;
70 const X = 0b00000100;
71 const NS = 0b00001000;
72
73 const RW = Self::R.bits() | Self::W.bits();
74 const RX = Self::R.bits() | Self::X.bits();
75 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
76
77 const USER = 0b00010000;
78 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010079 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010080 }
81}
82
83impl From<MemoryAccessRights> for Attributes {
84 fn from(access_rights: MemoryAccessRights) -> Self {
85 let data_access_permissions = match (
86 access_rights.contains(MemoryAccessRights::USER),
87 access_rights.contains(MemoryAccessRights::W),
88 ) {
89 (false, false) => DataAccessPermissions::ReadOnly_None,
90 (false, true) => DataAccessPermissions::ReadWrite_None,
91 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
92 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
93 };
94
95 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
96 MemoryAttributesIndex::Device_nGnRnE
97 } else {
98 MemoryAttributesIndex::Normal_IWBWA_OWBWA
99 };
100
101 Attributes {
102 uxn: !access_rights.contains(MemoryAccessRights::X)
103 || !access_rights.contains(MemoryAccessRights::USER),
104 pxn: !access_rights.contains(MemoryAccessRights::X)
105 || access_rights.contains(MemoryAccessRights::USER),
106 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100107 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100108 access_flag: true,
109 shareability: Shareability::NonShareable,
110 data_access_permissions,
111 non_secure: access_rights.contains(MemoryAccessRights::NS),
112 mem_attr_index,
113 }
114 }
115}
116
Imre Kisc9a55ff2025-01-17 15:06:50 +0100117#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100118pub enum RegimeVaRange {
119 Lower,
120 Upper,
121}
122
Imre Kisc9a55ff2025-01-17 15:06:50 +0100123#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100124pub enum TranslationRegime {
125 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
126 #[cfg(target_feature = "vh")]
127 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
128 EL2, // EL2
129 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100130}
131
Imre Kisc9a55ff2025-01-17 15:06:50 +0100132impl TranslationRegime {
133 fn is_upper_va_range(&self) -> bool {
134 match self {
135 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
136 #[cfg(target_feature = "vh")]
137 EL2_0(RegimeVaRange::Upper, _) => true,
138 _ => false,
139 }
140 }
141}
142
Imre Kis725ef5e2024-11-20 14:20:19 +0100143pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
144
Imre Kis21d7f722025-01-17 17:55:35 +0100145/// Trait for converting between virtual address space of the running kernel environment and
146/// the physical address space.
147pub trait KernelAddressTranslator {
148 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
149 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
150}
151
152pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> {
Imre Kis631127d2024-11-21 13:09:01 +0100153 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100154 page_pool: PagePool,
155 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100156 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100157 granule: TranslationGranule<VA_BITS>,
Imre Kis21d7f722025-01-17 17:55:35 +0100158 _kernel_address_translator: PhantomData<K>,
Imre Kis703482d2023-11-30 15:51:26 +0100159}
160
161/// Memory translation table handling
162/// # High level interface
163/// * allocate and map zero initialized region (with or without VA)
164/// * allocate and map memory region and load contents (with or without VA)
165/// * map memory region by PA (with or without VA)
166/// * unmap memory region by PA
167/// * query PA by VA
168/// * set access rights of mapped memory areas
169/// * active mapping
170///
171/// # Debug features
172/// * print translation table details
173///
174/// # Region level interface
175/// * map regions
176/// * unmap region
177/// * find a mapped region which contains
178/// * find empty area for region
179/// * set access rights for a region
180/// * create blocks by region
181///
182/// # Block level interface
183/// * map block
184/// * unmap block
185/// * set access rights of block
Imre Kis21d7f722025-01-17 17:55:35 +0100186impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100187 pub fn new(
188 page_pool: PagePool,
189 address: VirtualAddressRange,
190 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100191 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100192 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100193 let initial_lookup_level = granule.initial_lookup_level();
194
Imre Kisc9a55ff2025-01-17 15:06:50 +0100195 if !address.start.is_valid_in_regime::<VA_BITS>(regime)
196 || !address.end.is_valid_in_regime::<VA_BITS>(regime)
197 {
198 panic!(
199 "Invalid address range {:?} for regime {:?}",
200 address, regime
201 );
202 }
203
Imre Kis631127d2024-11-21 13:09:01 +0100204 let base_table = page_pool
205 .allocate_pages(
206 granule.table_size::<Descriptor>(initial_lookup_level),
207 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
208 )
209 .unwrap();
210
Imre Kis703482d2023-11-30 15:51:26 +0100211 let mut regions = RegionPool::new();
212 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100213 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100214 .unwrap();
215 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100216 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100217 page_pool,
218 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100219 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100220 granule,
Imre Kis21d7f722025-01-17 17:55:35 +0100221 _kernel_address_translator: PhantomData,
Imre Kis703482d2023-11-30 15:51:26 +0100222 }
223 }
224
225 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
226 /// initial data
227 /// # Arguments
228 /// * va: Virtual address of the memory area
229 /// * data: Data to be loaded to the memory area
230 /// * access_rights: Memory access rights of the area
231 /// # Return value
232 /// * Virtual address of the mapped memory
233 pub fn allocate_initalized_range(
234 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200235 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100236 data: &[u8],
237 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200238 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100239 let mut pages = self
240 .page_pool
241 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100242 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100243
Imre Kis21d7f722025-01-17 17:55:35 +0100244 pages.copy_data_to_page::<K>(data);
Imre Kis703482d2023-11-30 15:51:26 +0100245
246 let pages_length = pages.length();
247 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
248 let region = if let Some(required_va) = va {
249 self.regions
250 .acquire(required_va, pages_length, physical_region)
251 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100252 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100253 }
Imre Kisd20b5292024-12-04 16:05:30 +0100254 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100255
256 self.map_region(region, access_rights.into())
257 }
258
259 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
260 /// # Arguments
261 /// * va: Virtual address of the memory area
262 /// * length: Length of the memory area in bytes
263 /// * access_rights: Memory access rights of the area
264 /// # Return value
265 /// * Virtual address of the mapped memory
266 pub fn allocate_zero_init_range(
267 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200268 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100269 length: usize,
270 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200271 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100272 let mut pages = self
273 .page_pool
274 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100275 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100276
Imre Kis21d7f722025-01-17 17:55:35 +0100277 pages.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100278
279 let pages_length = pages.length();
280 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
281 let region = if let Some(required_va) = va {
282 self.regions
283 .acquire(required_va, pages_length, physical_region)
284 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100285 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100286 }
Imre Kisd20b5292024-12-04 16:05:30 +0100287 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100288
289 self.map_region(region, access_rights.into())
290 }
291
292 /// Map memory area by physical address
293 /// # Arguments
294 /// * va: Virtual address of the memory area
295 /// * pa: Physical address of the memory area
296 /// * length: Length of the memory area in bytes
297 /// * access_rights: Memory access rights of the area
298 /// # Return value
299 /// * Virtual address of the mapped memory
300 pub fn map_physical_address_range(
301 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200302 va: Option<VirtualAddress>,
303 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100304 length: usize,
305 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200306 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100307 let resource = PhysicalRegion::PhysicalAddress(pa);
308 let region = if let Some(required_va) = va {
309 self.regions.acquire(required_va, length, resource)
310 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100311 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100312 }
Imre Kisd20b5292024-12-04 16:05:30 +0100313 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100314
315 self.map_region(region, access_rights.into())
316 }
317
318 /// Unmap memory area by virtual address
319 /// # Arguments
320 /// * va: Virtual address
321 /// * length: Length of the memory area in bytes
322 pub fn unmap_virtual_address_range(
323 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200324 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100325 length: usize,
326 ) -> Result<(), XlatError> {
327 let pa = self.get_pa_by_va(va, length)?;
328
329 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
330
331 self.unmap_region(&region_to_release)?;
332
333 self.regions
334 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100335 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100336 }
337
338 /// Query physical address by virtual address range. Only returns a value if the memory area
339 /// mapped as continuous area.
340 /// # Arguments
341 /// * va: Virtual address of the memory area
342 /// * length: Length of the memory area in bytes
343 /// # Return value
344 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200345 pub fn get_pa_by_va(
346 &self,
347 va: VirtualAddress,
348 length: usize,
349 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100350 let containing_region = self
351 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100352 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100353
354 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100355 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100356 }
357
358 Ok(containing_region.get_pa_for_va(va))
359 }
360
361 /// Sets the memory access right of memory area
362 /// # Arguments
363 /// * va: Virtual address of the memory area
364 /// * length: Length of the memory area in bytes
365 /// * access_rights: New memory access rights of the area
366 pub fn set_access_rights(
367 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200368 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100369 length: usize,
370 access_rights: MemoryAccessRights,
371 ) -> Result<(), XlatError> {
372 let containing_region = self
373 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100374 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100375
376 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100377 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100378 }
379
380 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
381 self.map_region(region, access_rights.into())?;
382
383 Ok(())
384 }
385
386 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100387 ///
388 /// # Safety
389 /// When activating memory mapping for the running exception level, the
390 /// caller must ensure that the new mapping will not break any existing
391 /// references. After activation the caller must ensure that there are no
392 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100393 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100394 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100395 // Select translation granule
396 let is_tg0 = match &self.regime {
397 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
398 | TranslationRegime::EL2
399 | TranslationRegime::EL3 => true,
400 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
401 #[cfg(target_feature = "vh")]
402 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
403 #[cfg(target_feature = "vh")]
404 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
405 };
406
Imre Kis631127d2024-11-21 13:09:01 +0100407 if is_tg0 {
408 self.modify_tcr(|tcr| {
409 let tg0 = match self.granule {
410 TranslationGranule::Granule4k => 0b00,
411 TranslationGranule::Granule16k => 0b10,
412 TranslationGranule::Granule64k => 0b01,
413 };
414
415 (tcr & !(3 << 14)) | (tg0 << 14)
416 });
417 } else {
418 self.modify_tcr(|tcr| {
419 let tg1 = match self.granule {
420 TranslationGranule::Granule4k => 0b10,
421 TranslationGranule::Granule16k => 0b01,
422 TranslationGranule::Granule64k => 0b11,
423 };
424
425 (tcr & !(3 << 30)) | (tg1 << 30)
426 });
427 }
428
429 // Set translation table
Imre Kis21d7f722025-01-17 17:55:35 +0100430 let base_table_pa = self.base_table.get_pa().0 as u64;
Imre Kisc1dab892024-03-26 12:03:58 +0100431
Imre Kisb5146b52024-10-31 14:03:06 +0100432 match &self.regime {
433 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
434 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100435 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100436 in(reg) ((*asid as u64) << 48) | base_table_pa),
437 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
438 "msr ttbr1_el1, {0}
439 isb",
440 in(reg) ((*asid as u64) << 48) | base_table_pa),
441 #[cfg(target_feature = "vh")]
442 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
443 "msr ttbr0_el2, {0}
444 isb",
445 in(reg) ((*asid as u64) << 48) | base_table_pa),
446 #[cfg(target_feature = "vh")]
447 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
448 "msr ttbr1_el2, {0}
449 isb",
450 in(reg) ((*asid as u64) << 48) | base_table_pa),
451 TranslationRegime::EL2 => core::arch::asm!(
452 "msr ttbr0_el2, {0}
453 isb",
454 in(reg) base_table_pa),
455 TranslationRegime::EL3 => core::arch::asm!(
456 "msr ttbr0_el3, {0}
457 isb",
458 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100459 }
Imre Kis703482d2023-11-30 15:51:26 +0100460 }
461
Imre Kis1278c9f2025-01-15 19:48:36 +0100462 /// # Safety
463 /// Dummy functions for test builds
464 #[cfg(not(target_arch = "aarch64"))]
465 pub unsafe fn activate(&self) {}
466
Imre Kis631127d2024-11-21 13:09:01 +0100467 /// Modifies the TCR register of the selected regime of the instance.
468 #[cfg(target_arch = "aarch64")]
469 unsafe fn modify_tcr<F>(&self, f: F)
470 where
471 F: Fn(u64) -> u64,
472 {
473 let mut tcr: u64;
474
475 match &self.regime {
476 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
477 "mrs {0}, tcr_el1
478 isb",
479 out(reg) tcr),
480 #[cfg(target_feature = "vh")]
481 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
482 "mrs {0}, tcr_el2
483 isb",
484 out(reg) tcr),
485 TranslationRegime::EL2 => core::arch::asm!(
486 "mrs {0}, tcr_el2
487 isb",
488 out(reg) tcr),
489 TranslationRegime::EL3 => core::arch::asm!(
490 "mrs {0}, tcr_el3
491 isb",
492 out(reg) tcr),
493 }
494
495 tcr = f(tcr);
496
497 match &self.regime {
498 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
499 "msr tcr_el1, {0}
500 isb",
501 in(reg) tcr),
502 #[cfg(target_feature = "vh")]
503 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
504 "msr tcr_el2, {0}
505 isb",
506 in(reg) tcr),
507 TranslationRegime::EL2 => core::arch::asm!(
508 "msr tcr_el2, {0}
509 isb",
510 in(reg) tcr),
511 TranslationRegime::EL3 => core::arch::asm!(
512 "msr tcr_el3, {0}
513 isb",
514 in(reg) tcr),
515 }
516 }
517
Imre Kis703482d2023-11-30 15:51:26 +0100518 /// Prints a single translation table to the debug console
519 /// # Arguments
520 /// * level: Level of the translation table
521 /// * va: Base virtual address of the table
522 /// * table: Table entries
Imre Kis5f960442024-11-29 16:49:43 +0100523 fn dump_table(
524 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100525 level: isize,
526 va: usize,
527 table: &[Descriptor],
528 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100529 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100530 let level_prefix = match level {
531 0 | 1 => "|-",
532 2 => "| |-",
533 _ => "| | |-",
534 };
535
Imre Kis631127d2024-11-21 13:09:01 +0100536 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100537 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100538 DescriptorType::Block => {
539 writeln!(
540 f,
541 "{} {:#010x} Block -> {:#010x}",
542 level_prefix,
543 va,
544 descriptor.get_block_output_address(granule, level).0
545 )?;
546 }
Imre Kis703482d2023-11-30 15:51:26 +0100547 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100548 let table_pa = descriptor.get_next_level_table(level);
Imre Kis5f960442024-11-29 16:49:43 +0100549 writeln!(
550 f,
Imre Kis703482d2023-11-30 15:51:26 +0100551 "{} {:#010x} Table -> {:#010x}",
Imre Kisa7ef6842025-01-17 13:12:52 +0100552 level_prefix, va, table_pa.0
Imre Kis5f960442024-11-29 16:49:43 +0100553 )?;
Imre Kisa7ef6842025-01-17 13:12:52 +0100554
555 let next_level_table =
556 unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Imre Kis5f960442024-11-29 16:49:43 +0100557 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100558 }
559 _ => {}
560 }
561 }
Imre Kis5f960442024-11-29 16:49:43 +0100562
563 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100564 }
565
566 /// Adds memory region from the translation table. The function splits the region to blocks and
567 /// uses the block level functions to do the mapping.
568 /// # Arguments
569 /// * region: Memory region object
570 /// # Return value
571 /// * Virtual address of the mapped memory
572 fn map_region(
573 &mut self,
574 region: VirtualRegion,
575 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200576 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100577 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100578 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100579 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100580 region.length(),
581 self.granule,
582 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100583 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100584 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100585 }
586
587 Ok(region.base())
588 }
589
590 /// Remove memory region from the translation table. The function splits the region to blocks
591 /// and uses the block level functions to do the unmapping.
592 /// # Arguments
593 /// * region: Memory region object
594 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100595 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100596 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100597 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100598 region.length(),
599 self.granule,
600 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100601 for block in blocks {
602 self.unmap_block(block);
603 }
604
605 Ok(())
606 }
607
608 /// Find mapped region that contains the whole region
609 /// # Arguments
610 /// * region: Virtual address to look for
611 /// # Return value
612 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200613 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100614 self.regions.find_containing_region(va, length).ok()
615 }
616
Imre Kis703482d2023-11-30 15:51:26 +0100617 /// Add block to memory mapping
618 /// # Arguments
619 /// * block: Memory block that can be represented by a single translation table entry
620 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100621 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100622 Self::set_block_descriptor_recursively(
623 attributes,
624 block.pa,
625 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100626 block.size,
627 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100628 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100629 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100630 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100631 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100632 )
Imre Kis703482d2023-11-30 15:51:26 +0100633 }
634
635 /// Adds the block descriptor to the translation table along all the intermediate tables the
636 /// reach the required granule.
637 /// # Arguments
638 /// * attributes: Memory block's permssions, flags
639 /// * pa: Physical address
640 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100641 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100642 /// * level: Translation table level
643 /// * table: Translation table on the given level
644 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100645 /// * regime: Translation regime
646 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100647 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100648 fn set_block_descriptor_recursively(
649 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200650 pa: PhysicalAddress,
651 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100652 block_size: usize,
653 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100654 table: &mut [Descriptor],
655 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100656 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100657 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100658 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100659 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100660 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100661
662 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100663 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100664 // Follow break-before-make sequence
665 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
666 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100667 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100668 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100669 }
670
671 // Need to iterate forward
672 match descriptor.get_descriptor_type(level) {
673 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100674 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100675 let mut page = page_pool
676 .allocate_pages(
677 granule.table_size::<Descriptor>(level + 1),
678 Some(granule.table_alignment::<Descriptor>(level + 1)),
679 )
Imre Kisd20b5292024-12-04 16:05:30 +0100680 .map_err(|e| {
681 XlatError::PageAllocationError(
682 e,
683 granule.table_size::<Descriptor>(level + 1),
684 )
685 })?;
686
Imre Kis21d7f722025-01-17 17:55:35 +0100687 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100688
689 // Fill next level table
690 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100691 attributes,
692 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100693 va.mask_for_level(granule, level),
694 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100695 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100696 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100697 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100698 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100699 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100700 );
701
702 if result.is_ok() {
703 // Set table descriptor if the table is configured properly
Imre Kis21d7f722025-01-17 17:55:35 +0100704 let next_table_pa =
705 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100706 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100707 } else {
708 // Release next level table on error and keep invalid descriptor on current level
709 page_pool.release_pages(page).unwrap();
710 }
711
712 result
Imre Kis703482d2023-11-30 15:51:26 +0100713 }
714 DescriptorType::Block => {
715 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100716 let current_va = va.mask_for_level(granule, level);
717 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100718 let current_attributes = descriptor.get_block_attributes(level);
719
720 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100721
Imre Kisd20b5292024-12-04 16:05:30 +0100722 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100723 let mut page = page_pool
724 .allocate_pages(
725 granule.table_size::<Descriptor>(level + 1),
726 Some(granule.table_alignment::<Descriptor>(level + 1)),
727 )
Imre Kisd20b5292024-12-04 16:05:30 +0100728 .map_err(|e| {
729 XlatError::PageAllocationError(
730 e,
731 granule.table_size::<Descriptor>(level + 1),
732 )
733 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100734
Imre Kis21d7f722025-01-17 17:55:35 +0100735 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100736
737 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200738 for exploded_va in VirtualAddressRange::new(
739 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100740 current_va
741 .add_offset(granule.block_size_at_level(level))
742 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200743 )
Imre Kis631127d2024-11-21 13:09:01 +0100744 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100745 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200746 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100747
748 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100749 Self::set_block_descriptor_recursively(
750 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200751 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100752 exploded_va.mask_for_level(granule, level),
753 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100754 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100755 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100756 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100757 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100758 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100759 )
Imre Kisd20b5292024-12-04 16:05:30 +0100760 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100761 }
762
763 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100764 let result = Self::set_block_descriptor_recursively(
765 attributes,
766 pa,
767 va.mask_for_level(granule, level + 1),
768 block_size,
769 level + 1,
770 next_table,
771 page_pool,
772 regime,
773 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100774 );
Imre Kisd20b5292024-12-04 16:05:30 +0100775
776 if result.is_ok() {
Imre Kis21d7f722025-01-17 17:55:35 +0100777 let next_table_pa =
778 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100779
Imre Kisd20b5292024-12-04 16:05:30 +0100780 // Follow break-before-make sequence
781 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
782 Self::invalidate(regime, Some(current_va));
783
784 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100785 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100786 } else {
787 // Release next level table on error and keep invalid descriptor on current level
788 page_pool.release_pages(page).unwrap();
789 }
790
791 result
Imre Kis703482d2023-11-30 15:51:26 +0100792 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100793 DescriptorType::Table => {
794 let next_level_table = unsafe {
795 Self::get_table_from_pa_mut(
796 descriptor.get_next_level_table(level),
797 granule,
798 level + 1,
799 )
800 };
801
802 Self::set_block_descriptor_recursively(
803 attributes,
804 pa,
805 va.mask_for_level(granule, level),
806 block_size,
807 level + 1,
808 next_level_table,
809 page_pool,
810 regime,
811 granule,
812 )
813 }
Imre Kis703482d2023-11-30 15:51:26 +0100814 }
815 }
816
817 /// Remove block from memory mapping
818 /// # Arguments
819 /// * block: memory block that can be represented by a single translation entry
820 fn unmap_block(&mut self, block: Block) {
821 Self::remove_block_descriptor_recursively(
822 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100823 block.size,
824 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100825 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100826 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100827 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100828 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100829 )
Imre Kis703482d2023-11-30 15:51:26 +0100830 }
831
832 /// Removes block descriptor from the translation table along all the intermediate tables which
833 /// become empty during the removal process.
834 /// # Arguments
835 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100836 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100837 /// * level: Translation table level
838 /// * table: Translation table on the given level
839 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100840 /// * regime: Translation regime
841 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100842 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200843 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100844 block_size: usize,
845 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100846 table: &mut [Descriptor],
847 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100848 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100849 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100850 ) {
851 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100852 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100853
Imre Kis631127d2024-11-21 13:09:01 +0100854 // We reached the required level with the matching block size
855 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100856 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100857 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100858 return;
859 }
860
861 // Need to iterate forward
862 match descriptor.get_descriptor_type(level) {
863 DescriptorType::Invalid => {
864 panic!("Cannot remove block from non-existing table");
865 }
866 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100867 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100868 }
869 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100870 let next_level_table = unsafe {
871 Self::get_table_from_pa_mut(
872 descriptor.get_next_level_table(level),
873 granule,
874 level + 1,
875 )
876 };
877
Imre Kis703482d2023-11-30 15:51:26 +0100878 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100879 va.mask_for_level(granule, level),
880 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100881 level + 1,
882 next_level_table,
883 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100884 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100885 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100886 );
887
888 if next_level_table.iter().all(|d| !d.is_valid()) {
889 // Empty table
890 let mut page = unsafe {
Imre Kisa7ef6842025-01-17 13:12:52 +0100891 let table_pa = descriptor.set_table_descriptor_to_invalid(level);
892 let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
Imre Kis21d7f722025-01-17 17:55:35 +0100893 Pages::from_slice::<K, Descriptor>(next_table)
Imre Kis703482d2023-11-30 15:51:26 +0100894 };
Imre Kisa7ef6842025-01-17 13:12:52 +0100895
Imre Kis21d7f722025-01-17 17:55:35 +0100896 page.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100897 page_pool.release_pages(page).unwrap();
898 }
899 }
900 }
901 }
902
Imre Kis631127d2024-11-21 13:09:01 +0100903 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
904 Self::walk_descriptors(
905 va,
906 block_size,
907 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100908 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis631127d2024-11-21 13:09:01 +0100909 self.granule,
910 )
Imre Kis703482d2023-11-30 15:51:26 +0100911 }
912
913 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200914 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100915 block_size: usize,
916 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100917 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100918 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100919 ) -> &mut Descriptor {
920 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100921 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100922
Imre Kis631127d2024-11-21 13:09:01 +0100923 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100924 return descriptor;
925 }
926
927 // Need to iterate forward
928 match descriptor.get_descriptor_type(level) {
929 DescriptorType::Invalid => {
930 panic!("Invalid descriptor");
931 }
932 DescriptorType::Block => {
933 panic!("Cannot split existing block descriptor to table");
934 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100935 DescriptorType::Table => {
936 let next_level_table = unsafe {
937 Self::get_table_from_pa_mut(
938 descriptor.get_next_level_table(level),
939 granule,
940 level + 1,
941 )
942 };
943
944 Self::walk_descriptors(
945 va.mask_for_level(granule, level),
946 block_size,
947 level + 1,
948 next_level_table,
949 granule,
950 )
951 }
952 }
953 }
954
955 /// Create a translation table descriptor slice from a physical address.
956 ///
957 /// # Safety
958 /// The caller must ensure that the physical address points to a valid translation table and
959 /// it it mapped into the virtual address space of the running kernel context.
960 unsafe fn get_table_from_pa<'a>(
961 pa: PhysicalAddress,
962 granule: TranslationGranule<VA_BITS>,
963 level: isize,
964 ) -> &'a [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +0100965 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +0100966 unsafe {
967 core::slice::from_raw_parts(
Imre Kis21d7f722025-01-17 17:55:35 +0100968 table_va.0 as *const Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +0100969 granule.entry_count_at_level(level),
970 )
971 }
972 }
973
974 /// Create a mutable translation table descriptor slice from a physical address.
975 ///
976 /// # Safety
977 /// The caller must ensure that the physical address points to a valid translation table and
978 /// it it mapped into the virtual address space of the running kernel context.
979 unsafe fn get_table_from_pa_mut<'a>(
980 pa: PhysicalAddress,
981 granule: TranslationGranule<VA_BITS>,
982 level: isize,
983 ) -> &'a mut [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +0100984 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +0100985 unsafe {
986 core::slice::from_raw_parts_mut(
Imre Kis21d7f722025-01-17 17:55:35 +0100987 table_va.0 as *mut Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +0100988 granule.entry_count_at_level(level),
989 )
Imre Kis703482d2023-11-30 15:51:26 +0100990 }
991 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100992
Imre Kis1278c9f2025-01-15 19:48:36 +0100993 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +0100994 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
995 // SAFETY: The assembly code invalidates the translation table entry of
996 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +0100997 unsafe {
998 if let Some(VirtualAddress(va)) = va {
999 match regime {
1000 TranslationRegime::EL1_0(_, _) => {
1001 core::arch::asm!(
1002 "tlbi vaae1is, {0}
1003 dsb nsh
1004 isb",
1005 in(reg) va)
1006 }
1007 #[cfg(target_feature = "vh")]
1008 TranslationRegime::EL2_0(_, _) => {
1009 core::arch::asm!(
1010 "tlbi vaae1is, {0}
1011 dsb nsh
1012 isb",
1013 in(reg) va)
1014 }
1015 TranslationRegime::EL2 => core::arch::asm!(
1016 "tlbi vae2is, {0}
1017 dsb nsh
1018 isb",
1019 in(reg) va),
1020 TranslationRegime::EL3 => core::arch::asm!(
1021 "tlbi vae3is, {0}
1022 dsb nsh
1023 isb",
1024 in(reg) va),
1025 }
1026 } else {
1027 match regime {
1028 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
1029 "tlbi aside1, {0}
1030 dsb nsh
1031 isb",
1032 in(reg) (*asid as u64) << 48
1033 ),
1034 #[cfg(target_feature = "vh")]
1035 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
1036 "tlbi aside1, {0}
1037 dsb nsh
1038 isb",
1039 in(reg) (*asid as u64) << 48
1040 ),
1041 TranslationRegime::EL2 => core::arch::asm!(
1042 "tlbi alle2
1043 dsb nsh
1044 isb"
1045 ),
1046 TranslationRegime::EL3 => core::arch::asm!(
1047 "tlbi alle3
1048 dsb nsh
1049 isb"
1050 ),
1051 }
1052 }
1053 }
1054 }
Imre Kis1278c9f2025-01-15 19:48:36 +01001055
1056 #[cfg(not(target_arch = "aarch64"))]
1057 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +01001058}
Imre Kis5f960442024-11-29 16:49:43 +01001059
Imre Kis21d7f722025-01-17 17:55:35 +01001060impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {
Imre Kis5f960442024-11-29 16:49:43 +01001061 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
1062 f.debug_struct("Xlat")
1063 .field("regime", &self.regime)
1064 .field("granule", &self.granule)
1065 .field("VA_BITS", &VA_BITS)
1066 .field("base_table", &self.base_table.get_pa())
1067 .finish()?;
1068
1069 Self::dump_table(
1070 f,
1071 self.granule.initial_lookup_level(),
1072 0,
Imre Kis21d7f722025-01-17 17:55:35 +01001073 unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
Imre Kis5f960442024-11-29 16:49:43 +01001074 self.granule,
1075 )?;
1076
1077 Ok(())
1078 }
1079}