blob: fec87cc06b3e926b7fdcfa76cac5b6e35e111234 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
5#![allow(non_camel_case_types)]
6#![cfg_attr(not(test), no_std)]
7
8extern crate alloc;
9
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
11use core::{fmt, panic};
12
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis703482d2023-11-30 15:51:26 +010014use alloc::format;
15use alloc::string::{String, ToString};
16use alloc::vec::Vec;
17use log::debug;
18
19use bitflags::bitflags;
20use packed_struct::prelude::*;
21
22use self::descriptor::DescriptorType;
23
24use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
25use self::kernel_space::KernelSpace;
Imre Kis631127d2024-11-21 13:09:01 +010026use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010027use self::region::{PhysicalRegion, VirtualRegion};
28use self::region_pool::{Region, RegionPool, RegionPoolError};
29
Imre Kisd5b96fd2024-09-11 17:04:32 +020030pub mod address;
Imre Kis703482d2023-11-30 15:51:26 +010031mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010032mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010033pub mod kernel_space;
34pub mod page_pool;
35mod region;
36mod region_pool;
37
Imre Kis703482d2023-11-30 15:51:26 +010038/// Translation table error type
39#[derive(Debug)]
40pub enum XlatError {
41 InvalidParameterError(String),
42 AllocationError(String),
43 AlignmentError(String),
44 Overflow,
45 InvalidOperation(String),
46 Overlap,
47 NotFound,
48 RegionPoolError(RegionPoolError),
49}
50
51/// Memory attributes
52///
53/// MAIR_EL1 should be configured in the same way in startup.s
54#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
55pub enum MemoryAttributesIndex {
56 #[default]
57 Device_nGnRnE = 0x00,
58 Normal_IWBWA_OWBWA = 0x01,
59}
60
61bitflags! {
62 #[derive(Debug, Clone, Copy)]
63 pub struct MemoryAccessRights : u32 {
64 const R = 0b00000001;
65 const W = 0b00000010;
66 const X = 0b00000100;
67 const NS = 0b00001000;
68
69 const RW = Self::R.bits() | Self::W.bits();
70 const RX = Self::R.bits() | Self::X.bits();
71 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
72
73 const USER = 0b00010000;
74 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010075 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010076 }
77}
78
79impl From<MemoryAccessRights> for Attributes {
80 fn from(access_rights: MemoryAccessRights) -> Self {
81 let data_access_permissions = match (
82 access_rights.contains(MemoryAccessRights::USER),
83 access_rights.contains(MemoryAccessRights::W),
84 ) {
85 (false, false) => DataAccessPermissions::ReadOnly_None,
86 (false, true) => DataAccessPermissions::ReadWrite_None,
87 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
88 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
89 };
90
91 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
92 MemoryAttributesIndex::Device_nGnRnE
93 } else {
94 MemoryAttributesIndex::Normal_IWBWA_OWBWA
95 };
96
97 Attributes {
98 uxn: !access_rights.contains(MemoryAccessRights::X)
99 || !access_rights.contains(MemoryAccessRights::USER),
100 pxn: !access_rights.contains(MemoryAccessRights::X)
101 || access_rights.contains(MemoryAccessRights::USER),
102 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100103 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100104 access_flag: true,
105 shareability: Shareability::NonShareable,
106 data_access_permissions,
107 non_secure: access_rights.contains(MemoryAccessRights::NS),
108 mem_attr_index,
109 }
110 }
111}
112
113#[derive(PartialEq)]
114struct Block {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200115 pa: PhysicalAddress,
116 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100117 size: usize,
Imre Kis703482d2023-11-30 15:51:26 +0100118}
119
120impl Block {
Imre Kis631127d2024-11-21 13:09:01 +0100121 fn new(pa: PhysicalAddress, va: VirtualAddress, size: usize) -> Self {
122 Self { pa, va, size }
Imre Kis703482d2023-11-30 15:51:26 +0100123 }
124}
125
126impl fmt::Debug for Block {
127 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
128 f.debug_struct("Block")
Imre Kisd5b96fd2024-09-11 17:04:32 +0200129 .field("pa", &format_args!("{:#010x}", self.pa.0))
130 .field("va", &format_args!("{:#010x}", self.va.0))
Imre Kis631127d2024-11-21 13:09:01 +0100131 .field("size", &format_args!("{:#010x}", self.size))
Imre Kis703482d2023-11-30 15:51:26 +0100132 .finish()
133 }
134}
135
Imre Kisb5146b52024-10-31 14:03:06 +0100136pub enum RegimeVaRange {
137 Lower,
138 Upper,
139}
140
141pub enum TranslationRegime {
142 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
143 #[cfg(target_feature = "vh")]
144 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
145 EL2, // EL2
146 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100147}
148
Imre Kis725ef5e2024-11-20 14:20:19 +0100149pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
150
Imre Kis631127d2024-11-21 13:09:01 +0100151pub struct Xlat<const VA_BITS: usize> {
152 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100153 page_pool: PagePool,
154 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100155 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100156 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100157}
158
159/// Memory translation table handling
160/// # High level interface
161/// * allocate and map zero initialized region (with or without VA)
162/// * allocate and map memory region and load contents (with or without VA)
163/// * map memory region by PA (with or without VA)
164/// * unmap memory region by PA
165/// * query PA by VA
166/// * set access rights of mapped memory areas
167/// * active mapping
168///
169/// # Debug features
170/// * print translation table details
171///
172/// # Region level interface
173/// * map regions
174/// * unmap region
175/// * find a mapped region which contains
176/// * find empty area for region
177/// * set access rights for a region
178/// * create blocks by region
179///
180/// # Block level interface
181/// * map block
182/// * unmap block
183/// * set access rights of block
Imre Kis631127d2024-11-21 13:09:01 +0100184impl<const VA_BITS: usize> Xlat<VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100185 pub fn new(
186 page_pool: PagePool,
187 address: VirtualAddressRange,
188 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100189 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100190 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100191 let initial_lookup_level = granule.initial_lookup_level();
192
193 let base_table = page_pool
194 .allocate_pages(
195 granule.table_size::<Descriptor>(initial_lookup_level),
196 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
197 )
198 .unwrap();
199
Imre Kis703482d2023-11-30 15:51:26 +0100200 let mut regions = RegionPool::new();
201 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100202 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100203 .unwrap();
204 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100205 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100206 page_pool,
207 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100208 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100209 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100210 }
211 }
212
213 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
214 /// initial data
215 /// # Arguments
216 /// * va: Virtual address of the memory area
217 /// * data: Data to be loaded to the memory area
218 /// * access_rights: Memory access rights of the area
219 /// # Return value
220 /// * Virtual address of the mapped memory
221 pub fn allocate_initalized_range(
222 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200223 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100224 data: &[u8],
225 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200226 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100227 let mut pages = self
228 .page_pool
229 .allocate_pages(data.len(), Some(self.granule as usize))
230 .map_err(|e| {
231 XlatError::AllocationError(format!(
232 "Cannot allocate pages for {} bytes ({:?})",
233 data.len(),
234 e
235 ))
236 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100237
238 pages.copy_data_to_page(data);
239
240 let pages_length = pages.length();
241 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
242 let region = if let Some(required_va) = va {
243 self.regions
244 .acquire(required_va, pages_length, physical_region)
245 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100246 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100247 }
248 .map_err(XlatError::RegionPoolError)?;
249
250 self.map_region(region, access_rights.into())
251 }
252
253 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
254 /// # Arguments
255 /// * va: Virtual address of the memory area
256 /// * length: Length of the memory area in bytes
257 /// * access_rights: Memory access rights of the area
258 /// # Return value
259 /// * Virtual address of the mapped memory
260 pub fn allocate_zero_init_range(
261 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200262 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100263 length: usize,
264 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200265 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100266 let mut pages = self
267 .page_pool
268 .allocate_pages(length, Some(self.granule as usize))
269 .map_err(|e| {
270 XlatError::AllocationError(format!(
271 "Cannot allocate pages for {length} bytes ({e:?})"
272 ))
273 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100274
275 pages.zero_init();
276
277 let pages_length = pages.length();
278 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
279 let region = if let Some(required_va) = va {
280 self.regions
281 .acquire(required_va, pages_length, physical_region)
282 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100283 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100284 }
285 .map_err(XlatError::RegionPoolError)?;
286
287 self.map_region(region, access_rights.into())
288 }
289
290 /// Map memory area by physical address
291 /// # Arguments
292 /// * va: Virtual address of the memory area
293 /// * pa: Physical address of the memory area
294 /// * length: Length of the memory area in bytes
295 /// * access_rights: Memory access rights of the area
296 /// # Return value
297 /// * Virtual address of the mapped memory
298 pub fn map_physical_address_range(
299 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200300 va: Option<VirtualAddress>,
301 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100302 length: usize,
303 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200304 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100305 let resource = PhysicalRegion::PhysicalAddress(pa);
306 let region = if let Some(required_va) = va {
307 self.regions.acquire(required_va, length, resource)
308 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100309 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100310 }
311 .map_err(XlatError::RegionPoolError)?;
312
313 self.map_region(region, access_rights.into())
314 }
315
316 /// Unmap memory area by virtual address
317 /// # Arguments
318 /// * va: Virtual address
319 /// * length: Length of the memory area in bytes
320 pub fn unmap_virtual_address_range(
321 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200322 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100323 length: usize,
324 ) -> Result<(), XlatError> {
325 let pa = self.get_pa_by_va(va, length)?;
326
327 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
328
329 self.unmap_region(&region_to_release)?;
330
331 self.regions
332 .release(region_to_release)
333 .map_err(XlatError::RegionPoolError)
334 }
335
336 /// Query physical address by virtual address range. Only returns a value if the memory area
337 /// mapped as continuous area.
338 /// # Arguments
339 /// * va: Virtual address of the memory area
340 /// * length: Length of the memory area in bytes
341 /// # Return value
342 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200343 pub fn get_pa_by_va(
344 &self,
345 va: VirtualAddress,
346 length: usize,
347 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100348 let containing_region = self
349 .find_containing_region(va, length)
350 .ok_or(XlatError::NotFound)?;
351
352 if !containing_region.used() {
353 return Err(XlatError::NotFound);
354 }
355
356 Ok(containing_region.get_pa_for_va(va))
357 }
358
359 /// Sets the memory access right of memory area
360 /// # Arguments
361 /// * va: Virtual address of the memory area
362 /// * length: Length of the memory area in bytes
363 /// * access_rights: New memory access rights of the area
364 pub fn set_access_rights(
365 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200366 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100367 length: usize,
368 access_rights: MemoryAccessRights,
369 ) -> Result<(), XlatError> {
370 let containing_region = self
371 .find_containing_region(va, length)
372 .ok_or(XlatError::NotFound)?;
373
374 if !containing_region.used() {
375 return Err(XlatError::NotFound);
376 }
377
378 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
379 self.map_region(region, access_rights.into())?;
380
381 Ok(())
382 }
383
384 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100385 ///
386 /// # Safety
387 /// When activating memory mapping for the running exception level, the
388 /// caller must ensure that the new mapping will not break any existing
389 /// references. After activation the caller must ensure that there are no
390 /// active references when unmapping memory.
391 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100392 // Select translation granule
393 let is_tg0 = match &self.regime {
394 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
395 | TranslationRegime::EL2
396 | TranslationRegime::EL3 => true,
397 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
398 #[cfg(target_feature = "vh")]
399 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
400 #[cfg(target_feature = "vh")]
401 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
402 };
403
404 #[cfg(target_arch = "aarch64")]
405 if is_tg0 {
406 self.modify_tcr(|tcr| {
407 let tg0 = match self.granule {
408 TranslationGranule::Granule4k => 0b00,
409 TranslationGranule::Granule16k => 0b10,
410 TranslationGranule::Granule64k => 0b01,
411 };
412
413 (tcr & !(3 << 14)) | (tg0 << 14)
414 });
415 } else {
416 self.modify_tcr(|tcr| {
417 let tg1 = match self.granule {
418 TranslationGranule::Granule4k => 0b10,
419 TranslationGranule::Granule16k => 0b01,
420 TranslationGranule::Granule64k => 0b11,
421 };
422
423 (tcr & !(3 << 30)) | (tg1 << 30)
424 });
425 }
426
427 // Set translation table
428 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100429
Imre Kisb5146b52024-10-31 14:03:06 +0100430 #[cfg(target_arch = "aarch64")]
431 match &self.regime {
432 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
433 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100434 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100435 in(reg) ((*asid as u64) << 48) | base_table_pa),
436 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
437 "msr ttbr1_el1, {0}
438 isb",
439 in(reg) ((*asid as u64) << 48) | base_table_pa),
440 #[cfg(target_feature = "vh")]
441 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
442 "msr ttbr0_el2, {0}
443 isb",
444 in(reg) ((*asid as u64) << 48) | base_table_pa),
445 #[cfg(target_feature = "vh")]
446 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
447 "msr ttbr1_el2, {0}
448 isb",
449 in(reg) ((*asid as u64) << 48) | base_table_pa),
450 TranslationRegime::EL2 => core::arch::asm!(
451 "msr ttbr0_el2, {0}
452 isb",
453 in(reg) base_table_pa),
454 TranslationRegime::EL3 => core::arch::asm!(
455 "msr ttbr0_el3, {0}
456 isb",
457 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100458 }
Imre Kis703482d2023-11-30 15:51:26 +0100459 }
460
Imre Kis631127d2024-11-21 13:09:01 +0100461 /// Modifies the TCR register of the selected regime of the instance.
462 #[cfg(target_arch = "aarch64")]
463 unsafe fn modify_tcr<F>(&self, f: F)
464 where
465 F: Fn(u64) -> u64,
466 {
467 let mut tcr: u64;
468
469 match &self.regime {
470 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
471 "mrs {0}, tcr_el1
472 isb",
473 out(reg) tcr),
474 #[cfg(target_feature = "vh")]
475 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
476 "mrs {0}, tcr_el2
477 isb",
478 out(reg) tcr),
479 TranslationRegime::EL2 => core::arch::asm!(
480 "mrs {0}, tcr_el2
481 isb",
482 out(reg) tcr),
483 TranslationRegime::EL3 => core::arch::asm!(
484 "mrs {0}, tcr_el3
485 isb",
486 out(reg) tcr),
487 }
488
489 tcr = f(tcr);
490
491 match &self.regime {
492 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
493 "msr tcr_el1, {0}
494 isb",
495 in(reg) tcr),
496 #[cfg(target_feature = "vh")]
497 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
498 "msr tcr_el2, {0}
499 isb",
500 in(reg) tcr),
501 TranslationRegime::EL2 => core::arch::asm!(
502 "msr tcr_el2, {0}
503 isb",
504 in(reg) tcr),
505 TranslationRegime::EL3 => core::arch::asm!(
506 "msr tcr_el3, {0}
507 isb",
508 in(reg) tcr),
509 }
510 }
511
Imre Kis703482d2023-11-30 15:51:26 +0100512 /// Prints the translation tables to debug console recursively
513 pub fn print(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100514 debug!("Xlat table -> {:#010x}", self.base_table.get_pa().0 as u64);
515 Self::print_table(
516 self.granule.initial_lookup_level(),
517 0,
518 unsafe { self.base_table.get_as_slice() },
519 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100520 );
Imre Kis703482d2023-11-30 15:51:26 +0100521 }
522
523 /// Prints a single translation table to the debug console
524 /// # Arguments
525 /// * level: Level of the translation table
526 /// * va: Base virtual address of the table
527 /// * table: Table entries
Imre Kis631127d2024-11-21 13:09:01 +0100528 pub fn print_table(
529 level: isize,
530 va: usize,
531 table: &[Descriptor],
532 granule: TranslationGranule<VA_BITS>,
533 ) {
Imre Kis703482d2023-11-30 15:51:26 +0100534 let level_prefix = match level {
535 0 | 1 => "|-",
536 2 => "| |-",
537 _ => "| | |-",
538 };
539
Imre Kis631127d2024-11-21 13:09:01 +0100540 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100541 match descriptor.get_descriptor_type(level) {
542 DescriptorType::Block => debug!(
543 "{} {:#010x} Block -> {:#010x}",
544 level_prefix,
545 va,
Imre Kis631127d2024-11-21 13:09:01 +0100546 descriptor.get_block_output_address(granule, level).0
Imre Kis703482d2023-11-30 15:51:26 +0100547 ),
548 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100549 let next_level_table =
550 unsafe { descriptor.get_next_level_table(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100551 debug!(
552 "{} {:#010x} Table -> {:#010x}",
553 level_prefix,
554 va,
555 next_level_table.as_ptr() as usize
556 );
Imre Kis631127d2024-11-21 13:09:01 +0100557 Self::print_table(level + 1, va, next_level_table, granule);
Imre Kis703482d2023-11-30 15:51:26 +0100558 }
559 _ => {}
560 }
561 }
562 }
563
564 /// Adds memory region from the translation table. The function splits the region to blocks and
565 /// uses the block level functions to do the mapping.
566 /// # Arguments
567 /// * region: Memory region object
568 /// # Return value
569 /// * Virtual address of the mapped memory
570 fn map_region(
571 &mut self,
572 region: VirtualRegion,
573 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200574 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100575 let blocks = Self::split_region_to_blocks(
576 region.get_pa(),
577 region.base(),
578 region.length(),
579 self.granule,
580 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100581 for block in blocks {
582 self.map_block(block, attributes.clone());
583 }
584
585 Ok(region.base())
586 }
587
588 /// Remove memory region from the translation table. The function splits the region to blocks
589 /// and uses the block level functions to do the unmapping.
590 /// # Arguments
591 /// * region: Memory region object
592 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100593 let blocks = Self::split_region_to_blocks(
594 region.get_pa(),
595 region.base(),
596 region.length(),
597 self.granule,
598 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100599 for block in blocks {
600 self.unmap_block(block);
601 }
602
603 Ok(())
604 }
605
606 /// Find mapped region that contains the whole region
607 /// # Arguments
608 /// * region: Virtual address to look for
609 /// # Return value
610 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200611 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100612 self.regions.find_containing_region(va, length).ok()
613 }
614
615 /// Splits memory region to blocks that matches the granule size of the translation table.
616 /// # Arguments
617 /// * pa: Physical address
618 /// * va: Virtual address
619 /// * length: Region size in bytes
Imre Kis631127d2024-11-21 13:09:01 +0100620 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100621 /// # Return value
622 /// * Vector of granule sized blocks
623 fn split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200624 mut pa: PhysicalAddress,
625 mut va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100626 mut length: usize,
Imre Kis631127d2024-11-21 13:09:01 +0100627 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100628 ) -> Result<Vec<Block>, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100629 let min_granule_mask = granule.block_size_at_level(3) - 1;
Imre Kis703482d2023-11-30 15:51:26 +0100630
631 if length == 0 {
632 return Err(XlatError::InvalidParameterError(
633 "Length cannot be 0".to_string(),
634 ));
635 }
636
Imre Kisd5b96fd2024-09-11 17:04:32 +0200637 if (pa.0 | va.0 | length) & min_granule_mask != 0 {
Imre Kis703482d2023-11-30 15:51:26 +0100638 return Err(XlatError::InvalidParameterError(format!(
Imre Kis631127d2024-11-21 13:09:01 +0100639 "Addresses and length must be aligned {:#08x} {:#08x} {:#x} {:#x}",
640 pa.0, va.0, length, min_granule_mask
Imre Kis703482d2023-11-30 15:51:26 +0100641 )));
642 }
643
644 let mut pages = Vec::new();
645
646 while length > 0 {
Imre Kis631127d2024-11-21 13:09:01 +0100647 let initial_lookup_level = granule.initial_lookup_level();
Imre Kis703482d2023-11-30 15:51:26 +0100648
Imre Kis631127d2024-11-21 13:09:01 +0100649 for block_size in
650 (initial_lookup_level..=3).map(|level| granule.block_size_at_level(level))
651 {
652 if (pa.0 | va.0) & (block_size - 1) == 0 && length >= block_size {
653 pages.push(Block::new(pa, va, block_size));
654 pa = pa.add_offset(block_size).ok_or(XlatError::Overflow)?;
655 va = va.add_offset(block_size).ok_or(XlatError::Overflow)?;
656
657 length -= block_size;
Imre Kis703482d2023-11-30 15:51:26 +0100658 break;
659 }
660 }
661 }
662
663 Ok(pages)
664 }
665
666 /// Add block to memory mapping
667 /// # Arguments
668 /// * block: Memory block that can be represented by a single translation table entry
669 /// * attributes: Memory block's permissions, flags
670 fn map_block(&mut self, block: Block, attributes: Attributes) {
671 Self::set_block_descriptor_recursively(
672 attributes,
673 block.pa,
674 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100675 block.size,
676 self.granule.initial_lookup_level(),
677 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100678 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100679 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100680 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100681 );
682 }
683
684 /// Adds the block descriptor to the translation table along all the intermediate tables the
685 /// reach the required granule.
686 /// # Arguments
687 /// * attributes: Memory block's permssions, flags
688 /// * pa: Physical address
689 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100690 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100691 /// * level: Translation table level
692 /// * table: Translation table on the given level
693 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100694 /// * regime: Translation regime
695 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100696 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100697 fn set_block_descriptor_recursively(
698 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200699 pa: PhysicalAddress,
700 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100701 block_size: usize,
702 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100703 table: &mut [Descriptor],
704 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100705 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100706 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100707 ) {
708 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100709 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100710
711 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100712 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100713 // Follow break-before-make sequence
714 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
715 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100716 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kis703482d2023-11-30 15:51:26 +0100717 return;
718 }
719
720 // Need to iterate forward
721 match descriptor.get_descriptor_type(level) {
722 DescriptorType::Invalid => {
Imre Kis631127d2024-11-21 13:09:01 +0100723 let mut page = page_pool
724 .allocate_pages(
725 granule.table_size::<Descriptor>(level + 1),
726 Some(granule.table_alignment::<Descriptor>(level + 1)),
727 )
728 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100729 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100730 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100731 descriptor.set_table_descriptor(level, next_table, None);
732 }
733 Self::set_block_descriptor_recursively(
734 attributes,
735 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100736 va.mask_for_level(granule, level),
737 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100738 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100739 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100740 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100741 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100742 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100743 )
744 }
745 DescriptorType::Block => {
746 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100747 let current_va = va.mask_for_level(granule, level);
748 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100749 let current_attributes = descriptor.get_block_attributes(level);
750
751 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100752
753 // Follow break-before-make sequence
754 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
755 Self::invalidate(regime, Some(current_va));
756
757 let mut page = page_pool
758 .allocate_pages(
759 granule.table_size::<Descriptor>(level + 1),
760 Some(granule.table_alignment::<Descriptor>(level + 1)),
761 )
762 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100763 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100764 let next_table = page.get_as_mut_slice();
Imre Kis703482d2023-11-30 15:51:26 +0100765 descriptor.set_table_descriptor(level, next_table, None);
766 }
767
768 // Explode block descriptor to table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200769 for exploded_va in VirtualAddressRange::new(
770 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100771 current_va
772 .add_offset(granule.block_size_at_level(level))
773 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200774 )
Imre Kis631127d2024-11-21 13:09:01 +0100775 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100776 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200777 let offset = exploded_va.diff(current_va).unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100778 Self::set_block_descriptor_recursively(
779 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200780 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100781 exploded_va.mask_for_level(granule, level),
782 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100783 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100784 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100785 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100786 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100787 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100788 )
789 }
790
791 // Invoke self to continue recursion on the newly created level
792 Self::set_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100793 attributes, pa, va, block_size, level, table, page_pool, regime, granule,
Imre Kis703482d2023-11-30 15:51:26 +0100794 );
795 }
796 DescriptorType::Table => Self::set_block_descriptor_recursively(
797 attributes,
798 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100799 va.mask_for_level(granule, level),
800 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100801 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100802 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100803 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100804 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100805 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100806 ),
807 }
808 }
809
810 /// Remove block from memory mapping
811 /// # Arguments
812 /// * block: memory block that can be represented by a single translation entry
813 fn unmap_block(&mut self, block: Block) {
814 Self::remove_block_descriptor_recursively(
815 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100816 block.size,
817 self.granule.initial_lookup_level(),
818 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100819 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100820 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100821 self.granule,
Imre Kis703482d2023-11-30 15:51:26 +0100822 );
823 }
824
825 /// Removes block descriptor from the translation table along all the intermediate tables which
826 /// become empty during the removal process.
827 /// # Arguments
828 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100829 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100830 /// * level: Translation table level
831 /// * table: Translation table on the given level
832 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100833 /// * regime: Translation regime
834 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100835 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200836 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100837 block_size: usize,
838 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100839 table: &mut [Descriptor],
840 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100841 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100842 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100843 ) {
844 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100845 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100846
Imre Kis631127d2024-11-21 13:09:01 +0100847 // We reached the required level with the matching block size
848 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100849 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100850 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100851 return;
852 }
853
854 // Need to iterate forward
855 match descriptor.get_descriptor_type(level) {
856 DescriptorType::Invalid => {
857 panic!("Cannot remove block from non-existing table");
858 }
859 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100860 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100861 }
862 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100863 let next_level_table =
864 unsafe { descriptor.get_next_level_table_mut(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100865 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100866 va.mask_for_level(granule, level),
867 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100868 level + 1,
869 next_level_table,
870 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100871 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100872 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100873 );
874
875 if next_level_table.iter().all(|d| !d.is_valid()) {
876 // Empty table
877 let mut page = unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100878 Pages::from_slice(
879 descriptor.set_table_descriptor_to_invalid(granule, level),
880 )
Imre Kis703482d2023-11-30 15:51:26 +0100881 };
882 page.zero_init();
883 page_pool.release_pages(page).unwrap();
884 }
885 }
886 }
887 }
888
Imre Kis631127d2024-11-21 13:09:01 +0100889 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
890 Self::walk_descriptors(
891 va,
892 block_size,
893 self.granule.initial_lookup_level(),
894 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
895 self.granule,
896 )
Imre Kis703482d2023-11-30 15:51:26 +0100897 }
898
899 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200900 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100901 block_size: usize,
902 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100903 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100904 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100905 ) -> &mut Descriptor {
906 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100907 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100908
Imre Kis631127d2024-11-21 13:09:01 +0100909 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100910 return descriptor;
911 }
912
913 // Need to iterate forward
914 match descriptor.get_descriptor_type(level) {
915 DescriptorType::Invalid => {
916 panic!("Invalid descriptor");
917 }
918 DescriptorType::Block => {
919 panic!("Cannot split existing block descriptor to table");
920 }
Imre Kis631127d2024-11-21 13:09:01 +0100921 DescriptorType::Table => Self::walk_descriptors(
922 va.mask_for_level(granule, level),
923 block_size,
924 level + 1,
925 unsafe { descriptor.get_next_level_table_mut(granule, level) },
926 granule,
927 ),
Imre Kis703482d2023-11-30 15:51:26 +0100928 }
929 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100930
931 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
932 // SAFETY: The assembly code invalidates the translation table entry of
933 // the VA or all entries of the translation regime.
934 #[cfg(target_arch = "aarch64")]
935 unsafe {
936 if let Some(VirtualAddress(va)) = va {
937 match regime {
938 TranslationRegime::EL1_0(_, _) => {
939 core::arch::asm!(
940 "tlbi vaae1is, {0}
941 dsb nsh
942 isb",
943 in(reg) va)
944 }
945 #[cfg(target_feature = "vh")]
946 TranslationRegime::EL2_0(_, _) => {
947 core::arch::asm!(
948 "tlbi vaae1is, {0}
949 dsb nsh
950 isb",
951 in(reg) va)
952 }
953 TranslationRegime::EL2 => core::arch::asm!(
954 "tlbi vae2is, {0}
955 dsb nsh
956 isb",
957 in(reg) va),
958 TranslationRegime::EL3 => core::arch::asm!(
959 "tlbi vae3is, {0}
960 dsb nsh
961 isb",
962 in(reg) va),
963 }
964 } else {
965 match regime {
966 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
967 "tlbi aside1, {0}
968 dsb nsh
969 isb",
970 in(reg) (*asid as u64) << 48
971 ),
972 #[cfg(target_feature = "vh")]
973 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
974 "tlbi aside1, {0}
975 dsb nsh
976 isb",
977 in(reg) (*asid as u64) << 48
978 ),
979 TranslationRegime::EL2 => core::arch::asm!(
980 "tlbi alle2
981 dsb nsh
982 isb"
983 ),
984 TranslationRegime::EL3 => core::arch::asm!(
985 "tlbi alle3
986 dsb nsh
987 isb"
988 ),
989 }
990 }
991 }
992 }
Imre Kis703482d2023-11-30 15:51:26 +0100993}
994
Imre Kis42935a22024-10-17 11:30:16 +0200995#[cfg(test)]
996mod tests {
997 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100998
Imre Kis631127d2024-11-21 13:09:01 +0100999 type TestXlat = Xlat<36>;
1000
1001 fn make_block(pa: usize, va: usize, size: usize) -> Block {
1002 Block::new(PhysicalAddress(pa), VirtualAddress(va), size)
Imre Kisd5b96fd2024-09-11 17:04:32 +02001003 }
1004
Imre Kis42935a22024-10-17 11:30:16 +02001005 #[test]
1006 fn test_split_to_pages() {
Imre Kis631127d2024-11-21 13:09:01 +01001007 let pages = TestXlat::split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +02001008 PhysicalAddress(0x3fff_c000),
1009 VirtualAddress(0x3fff_c000),
1010 0x4020_5000,
Imre Kis631127d2024-11-21 13:09:01 +01001011 TranslationGranule::Granule4k,
Imre Kisd5b96fd2024-09-11 17:04:32 +02001012 )
1013 .unwrap();
1014 assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
1015 assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
1016 assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
1017 assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
1018 assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
1019 assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
1020 assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
Imre Kis42935a22024-10-17 11:30:16 +02001021 }
1022
1023 #[test]
1024 fn test_split_to_pages_unaligned() {
Imre Kis631127d2024-11-21 13:09:01 +01001025 let pages = TestXlat::split_region_to_blocks(
Imre Kisd5b96fd2024-09-11 17:04:32 +02001026 PhysicalAddress(0x3fff_c000),
1027 VirtualAddress(0x3f20_0000),
1028 0x200000,
Imre Kis631127d2024-11-21 13:09:01 +01001029 TranslationGranule::Granule4k,
Imre Kisd5b96fd2024-09-11 17:04:32 +02001030 )
1031 .unwrap();
Imre Kis42935a22024-10-17 11:30:16 +02001032 for (i, block) in pages.iter().enumerate().take(512) {
1033 assert_eq!(
Imre Kisd5b96fd2024-09-11 17:04:32 +02001034 make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
Imre Kis42935a22024-10-17 11:30:16 +02001035 *block
1036 );
1037 }
Imre Kis703482d2023-11-30 15:51:26 +01001038 }
1039}