Fixing various warnings
* Missing and incorrect # Safety comments
* Unused functions and variables
* Refactoring conditional AArch64 compilation
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Ie638d1eeb2928a0c6eb0d4eb64c9d19b82f91e4d
diff --git a/src/address.rs b/src/address.rs
index 320a0d2..5068f29 100644
--- a/src/address.rs
+++ b/src/address.rs
@@ -9,6 +9,10 @@
pub struct PhysicalAddress(pub(super) usize);
impl PhysicalAddress {
+ /// Create a new PhysicalAddress from the raw address value
+ ///
+ /// # Safety
+ /// The address has to be a valid physical address
pub const unsafe fn new(address: usize) -> Self {
Self(address)
}
@@ -54,6 +58,10 @@
pub struct VirtualAddress(pub(super) usize);
impl VirtualAddress {
+ /// Create a new VirtualAddress from the raw address value
+ ///
+ /// # Safety
+ /// The address has to be a valid virtual address
pub const unsafe fn new(address: usize) -> Self {
Self(address)
}
@@ -129,6 +137,10 @@
Self { start, end }
}
+ /// Create a new VirtualAddressRange from the raw address values
+ ///
+ /// # Safety
+ /// The addresses have to be valid virtual addresses
pub unsafe fn from_range(value: Range<usize>) -> Self {
Self::new(
VirtualAddress::new(value.start),
diff --git a/src/block.rs b/src/block.rs
index 59613a6..acf8822 100644
--- a/src/block.rs
+++ b/src/block.rs
@@ -103,10 +103,6 @@
};
}
- fn make_block(pa: usize, va: usize, size: usize) -> Block {
- Block::new(PhysicalAddress(pa), VirtualAddress(va), size)
- }
-
#[test]
fn test_block_iterator() {
let mut blocks = BlockIterator::new(
diff --git a/src/descriptor.rs b/src/descriptor.rs
index 0731856..627799c 100644
--- a/src/descriptor.rs
+++ b/src/descriptor.rs
@@ -24,6 +24,7 @@
}
/// Data access permission
+#[allow(non_camel_case_types)]
#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum DataAccessPermissions {
#[default]
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index 34433ff..f9e1d68 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -121,6 +121,7 @@
/// This changes the mapping of the running execution context. The caller
/// must ensure that existing references will be mapped to the same address
/// after activation.
+ #[cfg(target_arch = "aarch64")]
pub unsafe fn activate(&self) {
self.xlat.lock().activate();
}
diff --git a/src/lib.rs b/src/lib.rs
index 4c529dc..202776d 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
#![allow(dead_code)]
-#![allow(non_camel_case_types)]
#![cfg_attr(not(test), no_std)]
extern crate alloc;
@@ -55,6 +54,7 @@
/// Memory attributes
///
/// MAIR_EL1 should be configured in the same way in startup.s
+#[allow(non_camel_case_types)]
#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum MemoryAttributesIndex {
#[default]
@@ -361,6 +361,7 @@
/// caller must ensure that the new mapping will not break any existing
/// references. After activation the caller must ensure that there are no
/// active references when unmapping memory.
+ #[cfg(target_arch = "aarch64")]
pub unsafe fn activate(&self) {
// Select translation granule
let is_tg0 = match &self.regime {
@@ -374,7 +375,6 @@
TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
};
- #[cfg(target_arch = "aarch64")]
if is_tg0 {
self.modify_tcr(|tcr| {
let tg0 = match self.granule {
@@ -400,7 +400,6 @@
// Set translation table
let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
- #[cfg(target_arch = "aarch64")]
match &self.regime {
TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
"msr ttbr0_el1, {0}
@@ -431,6 +430,11 @@
}
}
+ /// # Safety
+ /// Dummy functions for test builds
+ #[cfg(not(target_arch = "aarch64"))]
+ pub unsafe fn activate(&self) {}
+
/// Modifies the TCR register of the selected regime of the instance.
#[cfg(target_arch = "aarch64")]
unsafe fn modify_tcr<F>(&self, f: F)
@@ -887,10 +891,10 @@
}
}
+ #[cfg(target_arch = "aarch64")]
fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
// SAFETY: The assembly code invalidates the translation table entry of
// the VA or all entries of the translation regime.
- #[cfg(target_arch = "aarch64")]
unsafe {
if let Some(VirtualAddress(va)) = va {
match regime {
@@ -949,6 +953,9 @@
}
}
}
+
+ #[cfg(not(target_arch = "aarch64"))]
+ fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
}
impl<const VA_BITS: usize> fmt::Debug for Xlat<VA_BITS> {
diff --git a/src/page_pool.rs b/src/page_pool.rs
index e0b6dc4..c2707c7 100644
--- a/src/page_pool.rs
+++ b/src/page_pool.rs
@@ -69,7 +69,8 @@
/// Get as slice
///
- /// **Unsafe**: The returned slice is created from its address and length which is stored in the
+ /// # Safety
+ /// The returned slice is created from its address and length which is stored in the
/// object. The caller has to ensure that no other references are being used of the pages.
pub unsafe fn get_as_slice<T>(&self) -> &[T] {
assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
@@ -82,7 +83,8 @@
/// Get as mutable slice
///
- /// **Unsafe**: The returned slice is created from its address and length which is stored in the
+ /// # Safety
+ /// The returned slice is created from its address and length which is stored in the
/// object. The caller has to ensure that no other references are being used of the pages.
pub unsafe fn get_as_mut_slice<T>(&mut self) -> &mut [T] {
assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
@@ -95,7 +97,8 @@
/// Set contents from slice
///
- /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
+ /// # Safety
+ /// The caller has to ensure that the passed slice is a valid page range.
pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
Pages {
pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
@@ -231,10 +234,6 @@
pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
self.pages.lock().release(pages_to_release)
}
-
- fn round_up_to_page_size(length: usize) -> usize {
- (length + Page::SIZE - 1) & !(Page::SIZE - 1)
- }
}
#[cfg(test)]