Refactor Xlat error handling
Updating XlatError variants and change it to use thiserror. Handle
allocation errors when allocating pages for translation tables.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I6b039d4c6c8481e66ccb838514053e9dc8d99e1c
diff --git a/src/block.rs b/src/block.rs
index e80617b..59613a6 100644
--- a/src/block.rs
+++ b/src/block.rs
@@ -3,9 +3,6 @@
use core::fmt;
-use alloc::format;
-use alloc::string::ToString;
-
use super::{
address::{PhysicalAddress, VirtualAddress},
TranslationGranule, XlatError,
@@ -51,16 +48,11 @@
let min_granule_mask = granule.block_size_at_level(3) - 1;
if length == 0 {
- return Err(XlatError::InvalidParameterError(
- "Length cannot be 0".to_string(),
- ));
+ return Err(XlatError::InvalidParameterError("Length cannot be 0"));
}
if (pa.0 | va.0 | length) & min_granule_mask != 0 {
- return Err(XlatError::InvalidParameterError(format!(
- "Addresses and length must be aligned {:#08x} {:#08x} {:#x} {:#x}",
- pa.0, va.0, length, min_granule_mask
- )));
+ return Err(XlatError::AlignmentError(pa, va, length, min_granule_mask));
}
Ok(Self {
diff --git a/src/lib.rs b/src/lib.rs
index 846351d..4c529dc 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -12,12 +12,11 @@
use core::panic;
use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
-use alloc::format;
-use alloc::string::String;
use block::{Block, BlockIterator};
use bitflags::bitflags;
use packed_struct::prelude::*;
+use thiserror::Error;
use self::descriptor::DescriptorType;
@@ -37,16 +36,20 @@
mod region_pool;
/// Translation table error type
-#[derive(Debug)]
+#[derive(Debug, Error)]
pub enum XlatError {
- InvalidParameterError(String),
- AllocationError(String),
- AlignmentError(String),
- Overflow,
- InvalidOperation(String),
- Overlap,
- NotFound,
- RegionPoolError(RegionPoolError),
+ #[error("Invalid parameter: {0}")]
+ InvalidParameterError(&'static str),
+ #[error("Cannot allocate {1}: {0:?}")]
+ PageAllocationError(RegionPoolError, usize),
+ #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
+ AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
+ #[error("Entry not found for {0:?}")]
+ VaNotFound(VirtualAddress),
+ #[error("Cannot allocate virtual address {0:?}")]
+ VaAllocationError(RegionPoolError),
+ #[error("Cannot release virtual address {1:?}: {0:?}")]
+ VaReleaseError(RegionPoolError, VirtualAddress),
}
/// Memory attributes
@@ -207,13 +210,7 @@
let mut pages = self
.page_pool
.allocate_pages(data.len(), Some(self.granule as usize))
- .map_err(|e| {
- XlatError::AllocationError(format!(
- "Cannot allocate pages for {} bytes ({:?})",
- data.len(),
- e
- ))
- })?;
+ .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
pages.copy_data_to_page(data);
@@ -225,7 +222,7 @@
} else {
self.regions.allocate(pages_length, physical_region, None)
}
- .map_err(XlatError::RegionPoolError)?;
+ .map_err(XlatError::VaAllocationError)?;
self.map_region(region, access_rights.into())
}
@@ -246,11 +243,7 @@
let mut pages = self
.page_pool
.allocate_pages(length, Some(self.granule as usize))
- .map_err(|e| {
- XlatError::AllocationError(format!(
- "Cannot allocate pages for {length} bytes ({e:?})"
- ))
- })?;
+ .map_err(|e| XlatError::PageAllocationError(e, length))?;
pages.zero_init();
@@ -262,7 +255,7 @@
} else {
self.regions.allocate(pages_length, physical_region, None)
}
- .map_err(XlatError::RegionPoolError)?;
+ .map_err(XlatError::VaAllocationError)?;
self.map_region(region, access_rights.into())
}
@@ -288,7 +281,7 @@
} else {
self.regions.allocate(length, resource, None)
}
- .map_err(XlatError::RegionPoolError)?;
+ .map_err(XlatError::VaAllocationError)?;
self.map_region(region, access_rights.into())
}
@@ -310,7 +303,7 @@
self.regions
.release(region_to_release)
- .map_err(XlatError::RegionPoolError)
+ .map_err(|e| XlatError::VaReleaseError(e, va))
}
/// Query physical address by virtual address range. Only returns a value if the memory area
@@ -327,10 +320,10 @@
) -> Result<PhysicalAddress, XlatError> {
let containing_region = self
.find_containing_region(va, length)
- .ok_or(XlatError::NotFound)?;
+ .ok_or(XlatError::VaNotFound(va))?;
if !containing_region.used() {
- return Err(XlatError::NotFound);
+ return Err(XlatError::VaNotFound(va));
}
Ok(containing_region.get_pa_for_va(va))
@@ -349,10 +342,10 @@
) -> Result<(), XlatError> {
let containing_region = self
.find_containing_region(va, length)
- .ok_or(XlatError::NotFound)?;
+ .ok_or(XlatError::VaNotFound(va))?;
if !containing_region.used() {
- return Err(XlatError::NotFound);
+ return Err(XlatError::VaNotFound(va));
}
let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
@@ -555,7 +548,7 @@
self.granule,
)?;
for block in blocks {
- self.map_block(block, attributes.clone());
+ self.map_block(block, attributes.clone())?;
}
Ok(region.base())
@@ -592,7 +585,7 @@
/// # Arguments
/// * block: Memory block that can be represented by a single translation table entry
/// * attributes: Memory block's permissions, flags
- fn map_block(&mut self, block: Block, attributes: Attributes) {
+ fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Self::set_block_descriptor_recursively(
attributes,
block.pa,
@@ -603,7 +596,7 @@
&self.page_pool,
&self.regime,
self.granule,
- );
+ )
}
/// Adds the block descriptor to the translation table along all the intermediate tables the
@@ -629,7 +622,7 @@
page_pool: &PagePool,
regime: &TranslationRegime,
granule: TranslationGranule<VA_BITS>,
- ) {
+ ) -> Result<(), XlatError> {
// Get descriptor of the current level
let descriptor = &mut table[va.get_level_index(granule, level)];
@@ -639,33 +632,49 @@
descriptor.set_block_or_invalid_descriptor_to_invalid(level);
Self::invalidate(regime, Some(va));
descriptor.set_block_descriptor(granule, level, pa, attributes);
- return;
+ return Ok(());
}
// Need to iterate forward
match descriptor.get_descriptor_type(level) {
DescriptorType::Invalid => {
+ // Allocate page for next level table
let mut page = page_pool
.allocate_pages(
granule.table_size::<Descriptor>(level + 1),
Some(granule.table_alignment::<Descriptor>(level + 1)),
)
- .unwrap();
- unsafe {
- let next_table = page.get_as_mut_slice();
- descriptor.set_table_descriptor(level, next_table, None);
- }
- Self::set_block_descriptor_recursively(
+ .map_err(|e| {
+ XlatError::PageAllocationError(
+ e,
+ granule.table_size::<Descriptor>(level + 1),
+ )
+ })?;
+
+ let next_table = unsafe { page.get_as_mut_slice() };
+
+ // Fill next level table
+ let result = Self::set_block_descriptor_recursively(
attributes,
pa,
va.mask_for_level(granule, level),
block_size,
level + 1,
- unsafe { descriptor.get_next_level_table_mut(granule, level) },
+ next_table,
page_pool,
regime,
granule,
- )
+ );
+
+ if result.is_ok() {
+ // Set table descriptor if the table is configured properly
+ unsafe { descriptor.set_table_descriptor(level, next_table, None) };
+ } else {
+ // Release next level table on error and keep invalid descriptor on current level
+ page_pool.release_pages(page).unwrap();
+ }
+
+ result
}
DescriptorType::Block => {
// Saving current descriptor details
@@ -675,22 +684,22 @@
// Replace block descriptor by table descriptor
- // Follow break-before-make sequence
- descriptor.set_block_or_invalid_descriptor_to_invalid(level);
- Self::invalidate(regime, Some(current_va));
-
+ // Allocate page for next level table
let mut page = page_pool
.allocate_pages(
granule.table_size::<Descriptor>(level + 1),
Some(granule.table_alignment::<Descriptor>(level + 1)),
)
- .unwrap();
- unsafe {
- let next_table = page.get_as_mut_slice();
- descriptor.set_table_descriptor(level, next_table, None);
- }
+ .map_err(|e| {
+ XlatError::PageAllocationError(
+ e,
+ granule.table_size::<Descriptor>(level + 1),
+ )
+ })?;
- // Explode block descriptor to table entries
+ let next_table = unsafe { page.get_as_mut_slice() };
+
+ // Explode existing block descriptor into table entries
for exploded_va in VirtualAddressRange::new(
current_va,
current_va
@@ -700,23 +709,48 @@
.step_by(granule.block_size_at_level(level + 1))
{
let offset = exploded_va.diff(current_va).unwrap();
+
+ // This call sets a single block descriptor and it should not fail
Self::set_block_descriptor_recursively(
current_attributes.clone(),
current_pa.add_offset(offset).unwrap(),
exploded_va.mask_for_level(granule, level),
granule.block_size_at_level(level + 1),
level + 1,
- unsafe { descriptor.get_next_level_table_mut(granule, level) },
+ next_table,
page_pool,
regime,
granule,
)
+ .unwrap();
}
// Invoke self to continue recursion on the newly created level
- Self::set_block_descriptor_recursively(
- attributes, pa, va, block_size, level, table, page_pool, regime, granule,
+ let result = Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va.mask_for_level(granule, level + 1),
+ block_size,
+ level + 1,
+ next_table,
+ page_pool,
+ regime,
+ granule,
);
+
+ if result.is_ok() {
+ // Follow break-before-make sequence
+ descriptor.set_block_or_invalid_descriptor_to_invalid(level);
+ Self::invalidate(regime, Some(current_va));
+
+ // Set table descriptor if the table is configured properly
+ unsafe { descriptor.set_table_descriptor(level, next_table, None) };
+ } else {
+ // Release next level table on error and keep invalid descriptor on current level
+ page_pool.release_pages(page).unwrap();
+ }
+
+ result
}
DescriptorType::Table => Self::set_block_descriptor_recursively(
attributes,
@@ -744,7 +778,7 @@
&self.page_pool,
&self.regime,
self.granule,
- );
+ )
}
/// Removes block descriptor from the translation table along all the intermediate tables which