Add physical and virtual address types
Create wrapper types for physical and virtual address to limit available
operations on addresses and to be able to require explicit address types
for given function parameters.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iaef5ab1af24fc153d959d79404b3827d9c85bf53
diff --git a/src/address.rs b/src/address.rs
new file mode 100644
index 0000000..12bbd8e
--- /dev/null
+++ b/src/address.rs
@@ -0,0 +1,148 @@
+// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+use core::ops::Range;
+
+use super::Xlat;
+
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
+pub struct PhysicalAddress(pub(super) usize);
+
+impl PhysicalAddress {
+ pub const unsafe fn new(address: usize) -> Self {
+ Self(address)
+ }
+
+ pub const fn add_offset(self, offset: usize) -> Option<Self> {
+ if let Some(address) = self.0.checked_add(offset) {
+ Some(Self(address))
+ } else {
+ None
+ }
+ }
+
+ pub const fn identity_va(self) -> VirtualAddress {
+ VirtualAddress(self.0)
+ }
+
+ pub const fn diff(self, rhs: Self) -> Option<usize> {
+ self.0.checked_sub(rhs.0)
+ }
+}
+
+impl From<PhysicalAddress> for usize {
+ fn from(value: PhysicalAddress) -> Self {
+ value.0
+ }
+}
+
+impl From<PhysicalAddress> for u64 {
+ fn from(value: PhysicalAddress) -> Self {
+ value.0 as u64
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
+pub struct VirtualAddress(pub(super) usize);
+
+impl VirtualAddress {
+ pub const unsafe fn new(address: usize) -> Self {
+ Self(address)
+ }
+
+ pub const fn add_offset(self, offset: usize) -> Option<Self> {
+ if let Some(address) = self.0.checked_add(offset) {
+ Some(Self(address))
+ } else {
+ None
+ }
+ }
+
+ pub const fn identity_pa(self) -> PhysicalAddress {
+ PhysicalAddress(self.0)
+ }
+
+ pub const fn mask_for_level(self, level: usize) -> Self {
+ Self(self.0 & (Xlat::GRANULE_SIZES[level] - 1))
+ }
+
+ pub const fn get_level_index(self, level: usize) -> usize {
+ self.0 / Xlat::GRANULE_SIZES[level]
+ }
+
+ pub const fn mask_bits(self, mask: usize) -> Self {
+ Self(self.0 & mask)
+ }
+
+ pub const fn diff(self, rhs: Self) -> Option<usize> {
+ self.0.checked_sub(rhs.0)
+ }
+}
+
+impl From<VirtualAddress> for usize {
+ fn from(value: VirtualAddress) -> Self {
+ value.0
+ }
+}
+
+impl From<VirtualAddress> for u64 {
+ fn from(value: VirtualAddress) -> Self {
+ value.0 as u64
+ }
+}
+
+pub struct VirtualAddressRange {
+ pub(super) start: VirtualAddress,
+ pub(super) end: VirtualAddress,
+}
+
+impl VirtualAddressRange {
+ pub fn new(start: VirtualAddress, end: VirtualAddress) -> Self {
+ Self { start, end }
+ }
+
+ pub unsafe fn from_range(value: Range<usize>) -> Self {
+ Self::new(
+ VirtualAddress::new(value.start),
+ VirtualAddress::new(value.end),
+ )
+ }
+
+ pub fn len(&self) -> Option<usize> {
+ self.end.diff(self.start)
+ }
+
+ pub fn step_by(self, step: usize) -> VirtualAddressIterator {
+ VirtualAddressIterator {
+ next: self.start,
+ end: self.end,
+ step,
+ }
+ }
+}
+
+pub struct VirtualAddressIterator {
+ next: VirtualAddress,
+ end: VirtualAddress,
+ step: usize,
+}
+
+impl Iterator for VirtualAddressIterator {
+ type Item = VirtualAddress;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next < self.end {
+ let current = self.next;
+
+ self.next = if let Some(next) = self.next.add_offset(self.step) {
+ next
+ } else {
+ self.end
+ };
+
+ Some(current)
+ } else {
+ None
+ }
+ }
+}
diff --git a/src/descriptor.rs b/src/descriptor.rs
index 6f370ec..bf10380 100644
--- a/src/descriptor.rs
+++ b/src/descriptor.rs
@@ -11,6 +11,8 @@
use crate::kernel_space::KernelSpace;
use crate::MemoryAttributesIndex;
+use super::address::PhysicalAddress;
+
/// Memory shareability
#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum Shareability {
@@ -151,27 +153,27 @@
pub fn set_block_descriptor(
&mut self,
level: usize,
- output_address: usize,
+ output_address: PhysicalAddress,
attributes: Attributes,
) {
let attr: u64 = attributes.into();
assert!(level <= 3);
assert!(self.get_descriptor_type(level) != DescriptorType::Table);
- assert_eq!(0, output_address & !Self::get_oa_mask(level));
+ assert_eq!(0, output_address.0 & !Self::get_oa_mask(level));
assert_eq!(0, attr & !Self::ATTR_MASK);
let table_bit = if level < 3 { 0 } else { Self::TABLE_BIT };
- self.set(Self::VALID_BIT | table_bit | output_address as u64 | attr);
+ self.set(Self::VALID_BIT | table_bit | output_address.0 as u64 | attr);
}
/// Get output address from the block descriptor
- pub fn get_block_output_address(&self, level: usize) -> usize {
+ pub fn get_block_output_address(&self, level: usize) -> PhysicalAddress {
assert!(level <= 3);
assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
- (self.get() & Self::OA_MASK) as usize
+ PhysicalAddress((self.get() & Self::OA_MASK) as usize)
}
/// Set the attributes of the block descriptor
@@ -461,7 +463,7 @@
cell: UnsafeCell::new(1),
};
- descriptor.set_block_descriptor(1, 0, Attributes::default());
+ descriptor.set_block_descriptor(1, PhysicalAddress(0), Attributes::default());
assert_eq!(0x1, descriptor.get());
}
@@ -472,7 +474,7 @@
cell: UnsafeCell::new(0),
};
- descriptor.set_block_descriptor(1, 1 << 63, Attributes::default());
+ descriptor.set_block_descriptor(1, PhysicalAddress(1 << 63), Attributes::default());
}
#[test]
@@ -483,7 +485,7 @@
descriptor.set_block_descriptor(
1,
- 0x0000000f_c0000000,
+ PhysicalAddress(0x0000000f_c0000000),
Attributes {
uxn: true,
..Default::default()
@@ -497,7 +499,7 @@
descriptor.set_block_descriptor(
3,
- 0x0000000f_fffff000,
+ PhysicalAddress(0x0000000f_fffff000),
Attributes {
uxn: true,
..Default::default()
@@ -505,7 +507,10 @@
);
assert_eq!(0x0040000f_fffff003, descriptor.get());
- assert_eq!(0x0000000f_fffff000, descriptor.get_block_output_address(3));
+ assert_eq!(
+ PhysicalAddress(0x0000000f_fffff000),
+ descriptor.get_block_output_address(3)
+ );
assert_eq!(
Attributes {
uxn: true,
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index d0a52a2..1069c1c 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -9,6 +9,7 @@
use spin::Mutex;
use super::{
+ address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
page_pool::{Page, PagePool},
MemoryAccessRights, Xlat, XlatError,
};
@@ -36,10 +37,9 @@
/// * page_pool: Page pool for allocation kernel translation tables
pub fn new(page_pool: PagePool) -> Self {
Self {
- xlat: Arc::new(Mutex::new(Xlat::new(
- page_pool,
- 0x0000_0000..0x10_0000_0000,
- ))),
+ xlat: Arc::new(Mutex::new(Xlat::new(page_pool, unsafe {
+ VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000)
+ }))),
}
}
@@ -56,16 +56,19 @@
) -> Result<(), XlatError> {
let mut xlat = self.xlat.lock();
+ let code_pa = PhysicalAddress(code_range.start);
+ let data_pa = PhysicalAddress(data_range.start);
+
xlat.map_physical_address_range(
- Some(code_range.start),
- code_range.start,
+ Some(code_pa.identity_va()),
+ code_pa,
code_range.len(),
MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
)?;
xlat.map_physical_address_range(
- Some(data_range.start),
- data_range.start,
+ Some(data_pa.identity_va()),
+ data_pa,
data_range.len(),
MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
)?;
@@ -86,14 +89,16 @@
length: usize,
access_rights: MemoryAccessRights,
) -> Result<usize, XlatError> {
+ let pa = PhysicalAddress(pa);
+
let lower_va = self.xlat.lock().map_physical_address_range(
- Some(pa),
+ Some(pa.identity_va()),
pa,
length,
access_rights | MemoryAccessRights::GLOBAL,
)?;
- Ok(Self::pa_to_kernel(lower_va as u64) as usize)
+ Ok(Self::pa_to_kernel(lower_va.0 as u64) as usize)
}
/// Unmap memory range from the kernel address space
@@ -103,9 +108,10 @@
/// # Return value
/// The result of the operation
pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
- self.xlat
- .lock()
- .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
+ self.xlat.lock().unmap_virtual_address_range(
+ VirtualAddress(Self::kernel_to_pa(va as u64) as usize),
+ length,
+ )
}
/// Activate kernel address space mapping
diff --git a/src/lib.rs b/src/lib.rs
index 7fc5839..884f8e0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -9,9 +9,9 @@
use core::arch::asm;
use core::iter::zip;
-use core::ops::Range;
use core::{fmt, panic};
+use address::{PhysicalAddress, VirtualAddressRange, VirtualAddress};
use alloc::boxed::Box;
use alloc::format;
use alloc::string::{String, ToString};
@@ -29,6 +29,7 @@
use self::region::{PhysicalRegion, VirtualRegion};
use self::region_pool::{Region, RegionPool, RegionPoolError};
+pub mod address;
mod descriptor;
pub mod kernel_space;
pub mod page_pool;
@@ -126,13 +127,13 @@
#[derive(PartialEq)]
struct Block {
- pa: usize,
- va: usize,
+ pa: PhysicalAddress,
+ va: VirtualAddress,
granule: usize,
}
impl Block {
- fn new(pa: usize, va: usize, granule: usize) -> Self {
+ fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self {
assert!(Xlat::GRANULE_SIZES.contains(&granule));
Self { pa, va, granule }
}
@@ -141,8 +142,8 @@
impl fmt::Debug for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Block")
- .field("pa", &format_args!("{:#010x}", self.pa))
- .field("va", &format_args!("{:#010x}", self.va))
+ .field("pa", &format_args!("{:#010x}", self.pa.0))
+ .field("va", &format_args!("{:#010x}", self.va.0))
.field("granule", &format_args!("{:#010x}", self.granule))
.finish()
}
@@ -189,10 +190,10 @@
impl Xlat {
pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
- pub fn new(page_pool: PagePool, va_range: Range<usize>) -> Self {
+ pub fn new(page_pool: PagePool, va_range: VirtualAddressRange) -> Self {
let mut regions = RegionPool::new();
regions
- .add(VirtualRegion::new(va_range.start, va_range.len()))
+ .add(VirtualRegion::new(va_range.start, va_range.len().unwrap()))
.unwrap();
Self {
base_table: Box::new(BaseTable::new()),
@@ -211,10 +212,10 @@
/// * Virtual address of the mapped memory
pub fn allocate_initalized_range(
&mut self,
- va: Option<usize>,
+ va: Option<VirtualAddress>,
data: &[u8],
access_rights: MemoryAccessRights,
- ) -> Result<usize, XlatError> {
+ ) -> Result<VirtualAddress, XlatError> {
let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
XlatError::AllocationError(format!(
"Cannot allocate pages for {} bytes ({:?})",
@@ -247,10 +248,10 @@
/// * Virtual address of the mapped memory
pub fn allocate_zero_init_range(
&mut self,
- va: Option<usize>,
+ va: Option<VirtualAddress>,
length: usize,
access_rights: MemoryAccessRights,
- ) -> Result<usize, XlatError> {
+ ) -> Result<VirtualAddress, XlatError> {
let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
})?;
@@ -280,11 +281,11 @@
/// * Virtual address of the mapped memory
pub fn map_physical_address_range(
&mut self,
- va: Option<usize>,
- pa: usize,
+ va: Option<VirtualAddress>,
+ pa: PhysicalAddress,
length: usize,
access_rights: MemoryAccessRights,
- ) -> Result<usize, XlatError> {
+ ) -> Result<VirtualAddress, XlatError> {
let resource = PhysicalRegion::PhysicalAddress(pa);
let region = if let Some(required_va) = va {
self.regions.acquire(required_va, length, resource)
@@ -302,7 +303,7 @@
/// * length: Length of the memory area in bytes
pub fn unmap_virtual_address_range(
&mut self,
- va: usize,
+ va: VirtualAddress,
length: usize,
) -> Result<(), XlatError> {
let pa = self.get_pa_by_va(va, length)?;
@@ -323,7 +324,11 @@
/// * length: Length of the memory area in bytes
/// # Return value
/// * Physical address of the mapped memory
- pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
+ pub fn get_pa_by_va(
+ &self,
+ va: VirtualAddress,
+ length: usize,
+ ) -> Result<PhysicalAddress, XlatError> {
let containing_region = self
.find_containing_region(va, length)
.ok_or(XlatError::NotFound)?;
@@ -342,7 +347,7 @@
/// * access_rights: New memory access rights of the area
pub fn set_access_rights(
&mut self,
- va: usize,
+ va: VirtualAddress,
length: usize,
access_rights: MemoryAccessRights,
) -> Result<(), XlatError> {
@@ -415,7 +420,7 @@
"{} {:#010x} Block -> {:#010x}",
level_prefix,
va,
- descriptor.get_block_output_address(level)
+ descriptor.get_block_output_address(level).0
),
DescriptorType::Table => {
let next_level_table = unsafe { descriptor.get_next_level_table(level) };
@@ -442,7 +447,7 @@
&mut self,
region: VirtualRegion,
attributes: Attributes,
- ) -> Result<usize, XlatError> {
+ ) -> Result<VirtualAddress, XlatError> {
let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
for block in blocks {
self.map_block(block, attributes.clone());
@@ -469,7 +474,7 @@
/// * region: Virtual address to look for
/// # Return value
/// * Reference to virtual region if found
- fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
+ fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
self.regions.find_containing_region(va, length).ok()
}
@@ -481,8 +486,8 @@
/// # Return value
/// * Vector of granule sized blocks
fn split_region_to_blocks(
- mut pa: usize,
- mut va: usize,
+ mut pa: PhysicalAddress,
+ mut va: VirtualAddress,
mut length: usize,
) -> Result<Vec<Block>, XlatError> {
let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
@@ -493,12 +498,10 @@
));
}
- if pa & min_granule_mask != 0
- || va & min_granule_mask != 0
- || length & min_granule_mask != 0
- {
+ if (pa.0 | va.0 | length) & min_granule_mask != 0 {
return Err(XlatError::InvalidParameterError(format!(
- "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
+ "Addresses and length must be aligned {:#08x} {:#08x} {:#x}",
+ pa.0, va.0, length
)));
}
@@ -506,10 +509,10 @@
while length > 0 {
for granule in &Self::GRANULE_SIZES {
- if (pa | va) & (*granule - 1) == 0 && length >= *granule {
+ if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule {
pages.push(Block::new(pa, va, *granule));
- pa += *granule;
- va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
+ pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?;
+ va = va.add_offset(*granule).ok_or(XlatError::Overflow)?;
length -= *granule;
break;
@@ -548,15 +551,15 @@
/// * page_pool: Page pool where the function can allocate pages for the translation tables
fn set_block_descriptor_recursively(
attributes: Attributes,
- pa: usize,
- va: usize,
+ pa: PhysicalAddress,
+ va: VirtualAddress,
granule: usize,
level: usize,
table: &mut [Descriptor],
page_pool: &PagePool,
) {
// Get descriptor of the current level
- let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+ let descriptor = &mut table[va.get_level_index(level)];
// We reached the required granule level
if Self::GRANULE_SIZES[level] == granule {
@@ -575,7 +578,7 @@
Self::set_block_descriptor_recursively(
attributes,
pa,
- va & (Self::GRANULE_SIZES[level] - 1),
+ va.mask_for_level(level),
granule,
level + 1,
unsafe { descriptor.get_next_level_table_mut(level) },
@@ -584,7 +587,7 @@
}
DescriptorType::Block => {
// Saving current descriptor details
- let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
+ let current_va = va.mask_for_level(level);
let current_pa = descriptor.get_block_output_address(level);
let current_attributes = descriptor.get_block_attributes(level);
@@ -596,14 +599,17 @@
}
// Explode block descriptor to table entries
- for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
- .step_by(Self::GRANULE_SIZES[level + 1])
+ for exploded_va in VirtualAddressRange::new(
+ current_va,
+ current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(),
+ )
+ .step_by(Self::GRANULE_SIZES[level + 1])
{
- let offset = exploded_va - current_va;
+ let offset = exploded_va.diff(current_va).unwrap();
Self::set_block_descriptor_recursively(
current_attributes.clone(),
- current_pa + offset,
- exploded_va & (Self::GRANULE_SIZES[level] - 1),
+ current_pa.add_offset(offset).unwrap(),
+ exploded_va.mask_for_level(level),
Self::GRANULE_SIZES[level + 1],
level + 1,
unsafe { descriptor.get_next_level_table_mut(level) },
@@ -619,7 +625,7 @@
DescriptorType::Table => Self::set_block_descriptor_recursively(
attributes,
pa,
- va & (Self::GRANULE_SIZES[level] - 1),
+ va.mask_for_level(level),
granule,
level + 1,
unsafe { descriptor.get_next_level_table_mut(level) },
@@ -650,14 +656,14 @@
/// * table: Translation table on the given level
/// * page_pool: Page pool where the function can release the pages of empty tables
fn remove_block_descriptor_recursively(
- va: usize,
+ va: VirtualAddress,
granule: usize,
level: usize,
table: &mut [Descriptor],
page_pool: &PagePool,
) {
// Get descriptor of the current level
- let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+ let descriptor = &mut table[va.get_level_index(level)];
// We reached the required granule level
if Self::GRANULE_SIZES[level] == granule {
@@ -676,7 +682,7 @@
DescriptorType::Table => {
let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
Self::remove_block_descriptor_recursively(
- va & (Self::GRANULE_SIZES[level] - 1),
+ va.mask_for_level(level),
granule,
level + 1,
next_level_table,
@@ -695,18 +701,18 @@
}
}
- fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
+ fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor {
Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
}
fn walk_descriptors(
- va: usize,
+ va: VirtualAddress,
granule: usize,
level: usize,
table: &mut [Descriptor],
) -> &mut Descriptor {
// Get descriptor of the current level
- let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+ let descriptor = &mut table[va.get_level_index(level)];
if Self::GRANULE_SIZES[level] == granule {
return descriptor;
@@ -720,12 +726,11 @@
DescriptorType::Block => {
panic!("Cannot split existing block descriptor to table");
}
- DescriptorType::Table => Self::walk_descriptors(
- va & (Self::GRANULE_SIZES[level] - 1),
- granule,
- level + 1,
- unsafe { descriptor.get_next_level_table_mut(level) },
- ),
+ DescriptorType::Table => {
+ Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe {
+ descriptor.get_next_level_table_mut(level)
+ })
+ }
}
}
}
@@ -734,24 +739,38 @@
mod tests {
use super::*;
+ fn make_block(pa: usize, va: usize, granule: usize) -> Block {
+ Block::new(PhysicalAddress(pa), VirtualAddress(va), granule)
+ }
+
#[test]
fn test_split_to_pages() {
- let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
- assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
- assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
- assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
- assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
- assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
- assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
- assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
+ let pages = Xlat::split_region_to_blocks(
+ PhysicalAddress(0x3fff_c000),
+ VirtualAddress(0x3fff_c000),
+ 0x4020_5000,
+ )
+ .unwrap();
+ assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
+ assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
+ assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
+ assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
+ assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
+ assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
+ assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
}
#[test]
fn test_split_to_pages_unaligned() {
- let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
+ let pages = Xlat::split_region_to_blocks(
+ PhysicalAddress(0x3fff_c000),
+ VirtualAddress(0x3f20_0000),
+ 0x200000,
+ )
+ .unwrap();
for (i, block) in pages.iter().enumerate().take(512) {
assert_eq!(
- Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
+ make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
*block
);
}
diff --git a/src/page_pool.rs b/src/page_pool.rs
index 5161ae7..8636af7 100644
--- a/src/page_pool.rs
+++ b/src/page_pool.rs
@@ -9,6 +9,7 @@
use alloc::vec::Vec;
use spin::Mutex;
+use super::address::PhysicalAddress;
use super::kernel_space::KernelSpace;
use super::region_pool::{Region, RegionPool, RegionPoolError};
@@ -62,8 +63,8 @@
}
/// Get physical address
- pub fn get_pa(&self) -> usize {
- self.pa
+ pub fn get_pa(&self) -> PhysicalAddress {
+ PhysicalAddress(self.pa)
}
/// Get as mutable slice
@@ -210,7 +211,7 @@
assert_eq!(area.as_ptr() as usize, pages.pa);
assert_eq!(area.len(), pages.length);
assert!(pages.used);
- assert_eq!(area.as_ptr() as usize, pages.get_pa());
+ assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
assert_eq!(area.as_ptr() as usize, pages.base());
assert_eq!(area.len(), pages.length());
assert!(pages.used());
diff --git a/src/region.rs b/src/region.rs
index 34428a2..83c54ab 100644
--- a/src/region.rs
+++ b/src/region.rs
@@ -9,6 +9,7 @@
use log::debug;
use super::{
+ address::{PhysicalAddress, VirtualAddress},
page_pool::{PagePool, Pages},
region_pool::Region,
};
@@ -22,12 +23,12 @@
pub enum PhysicalRegion {
Unused,
Allocated(PagePool, Pages),
- PhysicalAddress(usize),
+ PhysicalAddress(PhysicalAddress),
}
impl PhysicalRegion {
/// Get physical memory address
- fn get_pa(&self) -> usize {
+ fn get_pa(&self) -> PhysicalAddress {
match self {
PhysicalRegion::Unused => panic!("Unused area has no PA"),
PhysicalRegion::Allocated(_page_pool, pages) => pages.get_pa(),
@@ -40,24 +41,24 @@
///
/// A virtual memory region has a virtual address, a length and a physical region.
pub struct VirtualRegion {
- va: usize,
+ va: VirtualAddress,
length: usize,
physical_region: PhysicalRegion,
}
impl VirtualRegion {
/// Create new virtual memory region without a physical region
- pub fn new(va: usize, length: usize) -> Self {
+ pub fn new(va: VirtualAddress, length: usize) -> Self {
Self::new_from_fields(va, length, PhysicalRegion::Unused)
}
/// Create virtual region with points to a given physical address
- pub fn new_with_pa(pa: usize, va: usize, length: usize) -> Self {
+ pub fn new_with_pa(pa: PhysicalAddress, va: VirtualAddress, length: usize) -> Self {
Self::new_from_fields(va, length, PhysicalRegion::PhysicalAddress(pa))
}
/// Create virtual region by defining all the fields of the object
- fn new_from_fields(va: usize, length: usize, physical_region: PhysicalRegion) -> Self {
+ fn new_from_fields(va: VirtualAddress, length: usize, physical_region: PhysicalRegion) -> Self {
Self {
va,
length,
@@ -66,29 +67,29 @@
}
/// Get the base address of the linked physical region
- pub fn get_pa(&self) -> usize {
+ pub fn get_pa(&self) -> PhysicalAddress {
self.physical_region.get_pa()
}
/// Get physical address for a virtual address
- pub fn get_pa_for_va(&self, va: usize) -> usize {
- let offset = va.checked_sub(self.va).unwrap();
+ pub fn get_pa_for_va(&self, va: VirtualAddress) -> PhysicalAddress {
+ let offset = va.diff(self.va).unwrap();
assert!(offset < self.length);
- self.get_pa().checked_add(offset).unwrap()
+ self.get_pa().add_offset(offset).unwrap()
}
}
impl Region for VirtualRegion {
type Resource = PhysicalRegion;
- type Base = usize;
+ type Base = VirtualAddress;
type Length = usize;
- fn base(&self) -> usize {
+ fn base(&self) -> Self::Base {
self.va
}
- fn length(&self) -> usize {
+ fn length(&self) -> Self::Length {
self.length
}
@@ -96,9 +97,9 @@
!matches!(self.physical_region, PhysicalRegion::Unused)
}
- fn contains(&self, base: usize, length: usize) -> bool {
+ fn contains(&self, base: Self::Base, length: Self::Length) -> bool {
if let (Some(end), Some(self_end)) =
- (base.checked_add(length), self.va.checked_add(self.length))
+ (base.add_offset(length), self.va.add_offset(self.length))
{
self.va <= base && end <= self_end
} else {
@@ -108,7 +109,7 @@
fn try_append(&mut self, other: &Self) -> bool {
if let (Some(self_end), Some(new_length)) = (
- self.va.checked_add(self.length),
+ self.va.add_offset(self.length),
self.length.checked_add(other.length),
) {
if self_end == other.va {
@@ -125,7 +126,7 @@
) => {
// Used ranges can be only merged if the PA doesn't overflow and it is
// consecutive
- if let Some(self_end_pa) = self_pa.checked_add(self.length) {
+ if let Some(self_end_pa) = self_pa.add_offset(self.length) {
if self_end_pa == *other_pa {
self.length = new_length;
return true;
@@ -146,8 +147,8 @@
fn create_split(
&self,
- base: usize,
- length: usize,
+ base: Self::Base,
+ length: Self::Length,
resource: Option<Self::Resource>,
) -> (Self, Vec<Self>) {
assert!(self.contains(base, length));
@@ -159,15 +160,15 @@
let mut res = Vec::new();
if self.va != base {
- res.push(Self::new(self.va, base.checked_sub(self.va).unwrap()));
+ res.push(Self::new(self.va, base.diff(self.va).unwrap()));
}
res.push(Self::new_from_fields(base, length, physical_region));
- let end = base.checked_add(length).unwrap();
- let self_end = self.va.checked_add(self.length).unwrap();
+ let end = base.add_offset(length).unwrap();
+ let self_end = self.va.add_offset(self.length).unwrap();
if end != self_end {
- res.push(Self::new(end, self_end.checked_sub(end).unwrap()));
+ res.push(Self::new(end, self_end.diff(end).unwrap()));
}
(
@@ -190,23 +191,23 @@
res.push(Self::new_from_fields(
self.va,
- base.checked_sub(self.va).unwrap(),
+ base.diff(self.va).unwrap(),
physical_region,
));
}
res.push(Self::new(base, length));
- let end = base.checked_add(length).unwrap();
- let self_end = self.va.checked_add(self.length).unwrap();
+ let end = base.add_offset(length).unwrap();
+ let self_end = self.va.add_offset(self.length).unwrap();
if end != self_end {
let physical_region = match &self.physical_region {
PhysicalRegion::Allocated(_page_pool, _pages) => {
todo!("Implement Pages::split");
}
PhysicalRegion::PhysicalAddress(pa) => {
- let offset = end.checked_sub(self.va).unwrap();
- PhysicalRegion::PhysicalAddress(pa.checked_add(offset).unwrap())
+ let offset = end.diff(self.va).unwrap();
+ PhysicalRegion::PhysicalAddress(pa.add_offset(offset).unwrap())
}
_ => {
panic!("Splitting unused region by other unused")
@@ -215,7 +216,7 @@
res.push(Self::new_from_fields(
end,
- self_end.checked_sub(end).unwrap(),
+ self_end.diff(end).unwrap(),
physical_region,
));
}
@@ -235,9 +236,9 @@
if let PhysicalRegion::Allocated(page_pool, pages) = physical_region {
debug!(
- "Dropping physical region with pages: PA={:#010x} VA={:#010x}",
- pages.get_pa(),
- self.base(),
+ "Dropping physical region with pages: PA={:#08x} VA={:#08x}",
+ pages.get_pa().0,
+ self.base().0,
);
page_pool.release_pages(pages).unwrap();
@@ -247,8 +248,8 @@
#[cfg(test)]
mod tests {
- use super::super::page_pool::PagePoolArea;
use super::*;
+ use crate::page_pool::PagePoolArea;
#[test]
#[should_panic]
@@ -259,12 +260,14 @@
#[test]
fn test_physical_region() {
- const PA: usize = 0x0123_4567_89ab_cdef;
+ const PA: PhysicalAddress = PhysicalAddress(0x0123_4567_89ab_cdef);
const LENGTH: usize = 0x8000_0000_0000;
static PAGE_POOL_AREA: PagePoolArea<16> = PagePoolArea::new();
- let region =
- PhysicalRegion::Allocated(PagePool::new(&PAGE_POOL_AREA), Pages::new(PA, LENGTH, true));
+ let region = PhysicalRegion::Allocated(
+ PagePool::new(&PAGE_POOL_AREA),
+ Pages::new(PA.0, LENGTH, true),
+ );
assert_eq!(PA, region.get_pa());
let region = PhysicalRegion::PhysicalAddress(PA);
@@ -273,8 +276,8 @@
#[test]
fn test_virtual_region() {
- const VA: usize = 0x0123_4567_89ab_cdef;
- const PA: usize = 0xfedc_ba98_7654_3210;
+ const VA: VirtualAddress = VirtualAddress(0x0123_4567_89ab_cdef);
+ const PA: PhysicalAddress = PhysicalAddress(0xfedc_ba98_7654_3210);
const LENGTH: usize = 0x8000_0000_0000;
let region = VirtualRegion::new(VA, LENGTH);
@@ -300,36 +303,45 @@
#[test]
fn test_virtual_region_get_pa_for_va() {
- let region =
- VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
- assert_eq!(
- 0x8000_0000_0000_0000,
- region.get_pa_for_va(0x4000_0000_0000_0000)
+ let region = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000_0000_0000),
+ VirtualAddress(0x4000_0000_0000_0000),
+ 0x1000,
);
assert_eq!(
- 0x8000_0000_0000_0001,
- region.get_pa_for_va(0x4000_0000_0000_0001)
+ PhysicalAddress(0x8000_0000_0000_0000),
+ region.get_pa_for_va(VirtualAddress(0x4000_0000_0000_0000))
);
assert_eq!(
- 0x8000_0000_0000_0fff,
- region.get_pa_for_va(0x4000_0000_0000_0fff)
+ PhysicalAddress(0x8000_0000_0000_0001),
+ region.get_pa_for_va(VirtualAddress(0x4000_0000_0000_0001))
+ );
+ assert_eq!(
+ PhysicalAddress(0x8000_0000_0000_0fff),
+ region.get_pa_for_va(VirtualAddress(0x4000_0000_0000_0fff))
);
}
#[test]
#[should_panic]
fn test_virtual_region_get_pa_for_va_low_va() {
- let region =
- VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
- region.get_pa_for_va(0x3fff_ffff_ffff_ffff);
+ let region = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000_0000_0000),
+ VirtualAddress(0x4000_0000_0000_0000),
+ 0x1000,
+ );
+ region.get_pa_for_va(VirtualAddress(0x3fff_ffff_ffff_ffff));
}
#[test]
#[should_panic]
fn test_virtual_region_get_pa_for_va_high_va() {
- let region =
- VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
- region.get_pa_for_va(0x4000_0000_0000_1000);
+ let region = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000_0000_0000),
+ VirtualAddress(0x4000_0000_0000_0000),
+ 0x1000,
+ );
+ region.get_pa_for_va(VirtualAddress(0x4000_0000_0000_1000));
}
#[test]
@@ -337,120 +349,151 @@
const VA: usize = 0x8000_0000_0000_0000;
const LENGTH: usize = 0x8000_0000_0000;
- let region_overflow_end = VirtualRegion::new(0x8000_0000_0000_0000, 0x8000_0000_0000_0000);
- assert!(!region_overflow_end.contains(0x8000_0000_0000_0000, 1));
+ let region_overflow_end =
+ VirtualRegion::new(VirtualAddress(0x8000_0000_0000_0000), 0x8000_0000_0000_0000);
+ assert!(!region_overflow_end.contains(VirtualAddress(0x8000_0000_0000_0000), 1));
- let region = VirtualRegion::new(0x4000_0000_0000_0000, 0x8000_0000_0000_0000);
- assert!(!region.contains(0x8000_0000_0000_0000, 0x8000_0000_0000_0000));
+ let region =
+ VirtualRegion::new(VirtualAddress(0x4000_0000_0000_0000), 0x8000_0000_0000_0000);
+ assert!(!region.contains(VirtualAddress(0x8000_0000_0000_0000), 0x8000_0000_0000_0000));
- assert!(!region.contains(0x4000_0000_0000_0000, 0x8000_0000_0000_0001));
- assert!(!region.contains(0x3fff_ffff_ffff_ffff, 0x8000_0000_0000_0000));
- assert!(region.contains(0x4000_0000_0000_0000, 0x8000_0000_0000_0000));
- assert!(region.contains(0x4000_0000_0000_0000, 0x7fff_ffff_ffff_ffff));
- assert!(region.contains(0x4000_0000_0000_0001, 0x7fff_ffff_ffff_ffff));
+ assert!(!region.contains(VirtualAddress(0x4000_0000_0000_0000), 0x8000_0000_0000_0001));
+ assert!(!region.contains(VirtualAddress(0x3fff_ffff_ffff_ffff), 0x8000_0000_0000_0000));
+ assert!(region.contains(VirtualAddress(0x4000_0000_0000_0000), 0x8000_0000_0000_0000));
+ assert!(region.contains(VirtualAddress(0x4000_0000_0000_0000), 0x7fff_ffff_ffff_ffff));
+ assert!(region.contains(VirtualAddress(0x4000_0000_0000_0001), 0x7fff_ffff_ffff_ffff));
}
#[test]
fn test_virtual_region_try_append() {
// Both unused
- let mut region_unused0 = VirtualRegion::new(0x4000_0000, 0x1000);
- let mut region_unused1 = VirtualRegion::new(0x4000_1000, 0x1000);
+ let mut region_unused0 = VirtualRegion::new(VirtualAddress(0x4000_0000), 0x1000);
+ let mut region_unused1 = VirtualRegion::new(VirtualAddress(0x4000_1000), 0x1000);
assert!(!region_unused1.try_append(®ion_unused0));
- assert_eq!(0x4000_0000, region_unused0.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_unused0.va);
assert_eq!(0x1000, region_unused0.length);
- assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_unused1.va);
assert_eq!(0x1000, region_unused1.length);
assert!(region_unused0.try_append(®ion_unused1));
- assert_eq!(0x4000_0000, region_unused0.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_unused0.va);
assert_eq!(0x2000, region_unused0.length);
- assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_unused1.va);
assert_eq!(0x1000, region_unused1.length);
// Unused and PA region
- let mut region_unused = VirtualRegion::new(0x4000_0000, 0x1000);
- let region_physical = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_1000, 0x1000);
+ let mut region_unused = VirtualRegion::new(VirtualAddress(0x4000_0000), 0x1000);
+ let region_physical = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000),
+ VirtualAddress(0x4000_1000),
+ 0x1000,
+ );
assert!(!region_unused.try_append(®ion_physical));
- assert_eq!(0x4000_0000, region_unused.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_unused.va);
assert_eq!(0x1000, region_unused.length);
- assert_eq!(0x4000_1000, region_physical.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_physical.va);
assert_eq!(0x1000, region_physical.length);
// Both PA regions but non-consecutive PA ranges
- let mut region_physical0 = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x1000);
- let region_physical1 = VirtualRegion::new_with_pa(0x9000_0000, 0x4000_1000, 0x1000);
+ let mut region_physical0 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000),
+ VirtualAddress(0x4000_0000),
+ 0x1000,
+ );
+ let region_physical1 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x9000_0000),
+ VirtualAddress(0x4000_1000),
+ 0x1000,
+ );
assert!(!region_physical0.try_append(®ion_physical1));
- assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_physical0.va);
assert_eq!(0x1000, region_physical0.length);
- assert_eq!(0x4000_1000, region_physical1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_physical1.va);
assert_eq!(0x1000, region_physical1.length);
// Both PA regions with consecutive PA ranges
- let mut region_physical0 = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x1000);
- let region_physical1 = VirtualRegion::new_with_pa(0x8000_1000, 0x4000_1000, 0x1000);
+ let mut region_physical0 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000),
+ VirtualAddress(0x4000_0000),
+ 0x1000,
+ );
+ let region_physical1 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_1000),
+ VirtualAddress(0x4000_1000),
+ 0x1000,
+ );
assert!(region_physical0.try_append(®ion_physical1));
- assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_physical0.va);
assert_eq!(0x2000, region_physical0.length);
- assert_eq!(0x4000_1000, region_physical1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_physical1.va);
assert_eq!(0x1000, region_physical1.length);
// VA overflow
- let mut region_unused0 = VirtualRegion::new(0x8000_0000_0000_0000, 0x8000_0000_0000_0000);
- let mut region_unused1 = VirtualRegion::new(0x4000_1000, 0x1000);
+ let mut region_unused0: VirtualRegion =
+ VirtualRegion::new(VirtualAddress(0x8000_0000_0000_0000), 0x8000_0000_0000_0000);
+ let mut region_unused1 = VirtualRegion::new(VirtualAddress(0x4000_1000), 0x1000);
assert!(!region_unused0.try_append(®ion_unused1));
- assert_eq!(0x8000_0000_0000_0000, region_unused0.va);
+ assert_eq!(VirtualAddress(0x8000_0000_0000_0000), region_unused0.va);
assert_eq!(0x8000_0000_0000_0000, region_unused0.length);
- assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_unused1.va);
assert_eq!(0x1000, region_unused1.length);
assert!(!region_unused1.try_append(®ion_unused0));
- assert_eq!(0x8000_0000_0000_0000, region_unused0.va);
+ assert_eq!(VirtualAddress(0x8000_0000_0000_0000), region_unused0.va);
assert_eq!(0x8000_0000_0000_0000, region_unused0.length);
- assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(VirtualAddress(0x4000_1000), region_unused1.va);
assert_eq!(0x1000, region_unused1.length);
// PA overflow
- let mut region_physical0 =
- VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000, 0x8000_0000_0000_0000);
- let region_physical1 =
- VirtualRegion::new_with_pa(0x9000_0000, 0x8000_0000_4000_0000, 0x1000);
+ let mut region_physical0 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000_0000_0000),
+ VirtualAddress(0x4000_0000),
+ 0x8000_0000_0000_0000,
+ );
+ let region_physical1 = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x9000_0000),
+ VirtualAddress(0x8000_0000_4000_0000),
+ 0x1000,
+ );
assert!(!region_physical0.try_append(®ion_physical1));
- assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(VirtualAddress(0x4000_0000), region_physical0.va);
assert_eq!(0x8000_0000_0000_0000, region_physical0.length);
- assert_eq!(0x8000_0000_4000_0000, region_physical1.va);
+ assert_eq!(VirtualAddress(0x8000_0000_4000_0000), region_physical1.va);
assert_eq!(0x1000, region_physical1.length);
}
#[test]
fn test_virtual_region_create_split_by_used() {
- let region_unused = VirtualRegion::new(0x4000_0000, 0x4000);
+ let region_unused = VirtualRegion::new(VirtualAddress(0x4000_0000), 0x4000);
// New region at the start
let (new_region, splitted_regions) = region_unused.create_split(
- 0x4000_0000,
+ VirtualAddress(0x4000_0000),
0x1000,
- Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ Some(PhysicalRegion::PhysicalAddress(PhysicalAddress(
+ 0x8000_0000,
+ ))),
);
- assert_eq!(0x4000_0000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_0000), new_region.va);
assert_eq!(0x1000, new_region.length);
- assert_eq!(0x8000_0000, new_region.get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), new_region.get_pa());
assert!(matches!(
new_region.physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x1000, splitted_regions[0].length);
- assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), splitted_regions[0].get_pa());
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_1000), splitted_regions[1].va);
assert_eq!(0x3000, splitted_regions[1].length);
assert!(matches!(
splitted_regions[1].physical_region,
@@ -459,35 +502,37 @@
// New region in the middle
let (new_region, splitted_regions) = region_unused.create_split(
- 0x4000_1000,
+ VirtualAddress(0x4000_1000),
0x1000,
- Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ Some(PhysicalRegion::PhysicalAddress(PhysicalAddress(
+ 0x8000_0000,
+ ))),
);
- assert_eq!(0x4000_1000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_1000), new_region.va);
assert_eq!(0x1000, new_region.length);
- assert_eq!(0x8000_0000, new_region.get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), new_region.get_pa());
assert!(matches!(
new_region.physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x1000, splitted_regions[0].length);
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::Unused
));
- assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_1000), splitted_regions[1].va);
assert_eq!(0x1000, splitted_regions[1].length);
- assert_eq!(0x8000_0000, splitted_regions[1].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), splitted_regions[1].get_pa());
assert!(matches!(
splitted_regions[1].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_2000, splitted_regions[2].va);
+ assert_eq!(VirtualAddress(0x4000_2000), splitted_regions[2].va);
assert_eq!(0x2000, splitted_regions[2].length);
assert!(matches!(
splitted_regions[2].physical_region,
@@ -496,29 +541,31 @@
// New region at the end
let (new_region, splitted_regions) = region_unused.create_split(
- 0x4000_3000,
+ VirtualAddress(0x4000_3000),
0x1000,
- Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ Some(PhysicalRegion::PhysicalAddress(PhysicalAddress(
+ 0x8000_0000,
+ ))),
);
- assert_eq!(0x4000_3000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_3000), new_region.va);
assert_eq!(0x1000, new_region.length);
- assert_eq!(0x8000_0000, new_region.get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), new_region.get_pa());
assert!(matches!(
new_region.physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x3000, splitted_regions[0].length);
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::Unused
));
- assert_eq!(0x4000_3000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_3000), splitted_regions[1].va);
assert_eq!(0x1000, splitted_regions[1].length);
- assert_eq!(0x8000_0000, splitted_regions[1].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), splitted_regions[1].get_pa());
assert!(matches!(
splitted_regions[1].physical_region,
PhysicalRegion::PhysicalAddress(_)
@@ -527,76 +574,83 @@
#[test]
fn test_virtual_region_create_split_by_unused() {
- let region_unused = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x4000);
+ let region_unused = VirtualRegion::new_with_pa(
+ PhysicalAddress(0x8000_0000),
+ VirtualAddress(0x4000_0000),
+ 0x4000,
+ );
// New region at the start
- let (new_region, splitted_regions) = region_unused.create_split(0x4000_0000, 0x1000, None);
+ let (new_region, splitted_regions) =
+ region_unused.create_split(VirtualAddress(0x4000_0000), 0x1000, None);
- assert_eq!(0x4000_0000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_0000), new_region.va); // TODO: why do we need explicit type here?
assert_eq!(0x1000, new_region.length);
assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x1000, splitted_regions[0].length);
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::Unused
));
- assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_1000), splitted_regions[1].va);
assert_eq!(0x3000, splitted_regions[1].length);
- assert_eq!(0x8000_1000, splitted_regions[1].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_1000), splitted_regions[1].get_pa());
assert!(matches!(
splitted_regions[1].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
// New region in the middle
- let (new_region, splitted_regions) = region_unused.create_split(0x4000_1000, 0x1000, None);
+ let (new_region, splitted_regions) =
+ region_unused.create_split(VirtualAddress(0x4000_1000), 0x1000, None);
- assert_eq!(0x4000_1000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_1000), new_region.va);
assert_eq!(0x1000, new_region.length);
assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x1000, splitted_regions[0].length);
- assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), splitted_regions[0].get_pa());
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_1000), splitted_regions[1].va);
assert_eq!(0x1000, splitted_regions[1].length);
assert!(matches!(
splitted_regions[1].physical_region,
PhysicalRegion::Unused
));
- assert_eq!(0x4000_2000, splitted_regions[2].va);
+ assert_eq!(VirtualAddress(0x4000_2000), splitted_regions[2].va);
assert_eq!(0x2000, splitted_regions[2].length);
- assert_eq!(0x8000_2000, splitted_regions[2].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_2000), splitted_regions[2].get_pa());
assert!(matches!(
splitted_regions[2].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
// New region at the end
- let (new_region, splitted_regions) = region_unused.create_split(0x4000_3000, 0x1000, None);
+ let (new_region, splitted_regions) =
+ region_unused.create_split(VirtualAddress(0x4000_3000), 0x1000, None);
- assert_eq!(0x4000_3000, new_region.va);
+ assert_eq!(VirtualAddress(0x4000_3000), new_region.va);
assert_eq!(0x1000, new_region.length);
assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
- assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(VirtualAddress(0x4000_0000), splitted_regions[0].va);
assert_eq!(0x3000, splitted_regions[0].length);
- assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert_eq!(PhysicalAddress(0x8000_0000), splitted_regions[0].get_pa());
assert!(matches!(
splitted_regions[0].physical_region,
PhysicalRegion::PhysicalAddress(_)
));
- assert_eq!(0x4000_3000, splitted_regions[1].va);
+ assert_eq!(VirtualAddress(0x4000_3000), splitted_regions[1].va);
assert_eq!(0x1000, splitted_regions[1].length);
assert!(matches!(
@@ -608,26 +662,29 @@
#[test]
#[should_panic]
fn test_virtual_region_does_not_contain() {
- let region = VirtualRegion::new(0x4000_0000, 0x1000);
+ let region = VirtualRegion::new(VirtualAddress(0x4000_0000), 0x1000);
region.create_split(
- 0x8000_0000,
+ VirtualAddress(0x8000_0000),
0x1000,
- Some(PhysicalRegion::PhysicalAddress(0xc000_0000)),
+ Some(PhysicalRegion::PhysicalAddress(PhysicalAddress(
+ 0xc000_0000,
+ ))),
);
}
#[test]
#[should_panic]
fn test_virtual_region_create_split_same_used() {
- let region = VirtualRegion::new(0x4000_0000, 0x1000);
- region.create_split(0x4000_0000, 0x1000, Some(PhysicalRegion::Unused));
+ let region = VirtualRegion::new(VirtualAddress(0x4000_0000), 0x1000);
+ region.create_split(
+ VirtualAddress(0x4000_0000),
+ 0x1000,
+ Some(PhysicalRegion::Unused),
+ );
}
#[test]
fn test_virtual_region_drop() {
- const PA: usize = 0x0123_4567_89ab_cdef;
- const LENGTH: usize = 0x8000_0000_0000;
-
static PAGE_POOL_AREA: PagePoolArea<8192> = PagePoolArea::new();
let page_pool = PagePool::new(&PAGE_POOL_AREA);
let page = page_pool.allocate_pages(4096).unwrap();
@@ -635,7 +692,8 @@
let physical_region = PhysicalRegion::Allocated(page_pool, page);
// Testing physical region drop through virtualregion
- let virtual_region = VirtualRegion::new_from_fields(0x4000_0000, 1000, physical_region);
+ let virtual_region =
+ VirtualRegion::new_from_fields(VirtualAddress(0x4000_0000), 1000, physical_region);
drop(virtual_region);
}
}