Add support for EL2 and EL3 translation regimes
* Assign translation regime to Xlat instance on creating
* Mark activate function as unsafe
* Remove half-baked TLB invalidation for TTBR1_EL1. The correct TLB
invalidation method for all translation regimes is implemented in the
next commit.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Idacc85abf3df6bf7f0c6ad263d3890e1ad5dfab4
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
index 1069c1c..d0be315 100644
--- a/src/kernel_space.rs
+++ b/src/kernel_space.rs
@@ -11,11 +11,9 @@
use super::{
address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
page_pool::{Page, PagePool},
- MemoryAccessRights, Xlat, XlatError,
+ MemoryAccessRights, RegimeVaRange, TranslationRegime, Xlat, XlatError,
};
-static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
-
#[derive(Clone)]
pub struct KernelSpace {
xlat: Arc<Mutex<Xlat>>,
@@ -37,9 +35,11 @@
/// * page_pool: Page pool for allocation kernel translation tables
pub fn new(page_pool: PagePool) -> Self {
Self {
- xlat: Arc::new(Mutex::new(Xlat::new(page_pool, unsafe {
- VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000)
- }))),
+ xlat: Arc::new(Mutex::new(Xlat::new(
+ page_pool,
+ unsafe { VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000) },
+ TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
+ ))),
}
}
@@ -115,8 +115,13 @@
}
/// Activate kernel address space mapping
- pub fn activate(&self) {
- self.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
+ ///
+ /// # Safety
+ /// This changes the mapping of the running execution context. The caller
+ /// must ensure that existing references will be mapped to the same address
+ /// after activation.
+ pub unsafe fn activate(&self) {
+ self.xlat.lock().activate();
}
/// Rounds a value down to a kernel space page boundary
diff --git a/src/lib.rs b/src/lib.rs
index 884f8e0..dd7de75 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -7,11 +7,10 @@
extern crate alloc;
-use core::arch::asm;
use core::iter::zip;
use core::{fmt, panic};
-use address::{PhysicalAddress, VirtualAddressRange, VirtualAddress};
+use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
use alloc::boxed::Box;
use alloc::format;
use alloc::string::{String, ToString};
@@ -149,17 +148,24 @@
}
}
-/// Enum for selecting TTBR0_EL1 or TTBR1_EL1
-#[allow(clippy::upper_case_acronyms)]
-pub enum TTBR {
- TTBR0_EL1,
- TTBR1_EL1,
+pub enum RegimeVaRange {
+ Lower,
+ Upper,
+}
+
+pub enum TranslationRegime {
+ EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
+ #[cfg(target_feature = "vh")]
+ EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
+ EL2, // EL2
+ EL3, // EL3, TTBR0_EL3
}
pub struct Xlat {
base_table: Box<BaseTable>,
page_pool: PagePool,
regions: RegionPool<VirtualRegion>,
+ regime: TranslationRegime,
}
/// Memory translation table handling
@@ -190,15 +196,20 @@
impl Xlat {
pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
- pub fn new(page_pool: PagePool, va_range: VirtualAddressRange) -> Self {
+ pub fn new(
+ page_pool: PagePool,
+ address: VirtualAddressRange,
+ regime: TranslationRegime,
+ ) -> Self {
let mut regions = RegionPool::new();
regions
- .add(VirtualRegion::new(va_range.start, va_range.len().unwrap()))
+ .add(VirtualRegion::new(address.start, address.len().unwrap()))
.unwrap();
Self {
base_table: Box::new(BaseTable::new()),
page_pool,
regions,
+ regime,
}
}
@@ -366,30 +377,43 @@
}
/// Activate memory mapping represented by the object
- /// # Arguments
- /// * asid: ASID of the table base address
- /// * ttbr: Selects TTBR0_EL1/TTBR1_EL1
- pub fn activate(&self, asid: u8, ttbr: TTBR) {
+ ///
+ /// # Safety
+ /// When activating memory mapping for the running exception level, the
+ /// caller must ensure that the new mapping will not break any existing
+ /// references. After activation the caller must ensure that there are no
+ /// active references when unmapping memory.
+ pub unsafe fn activate(&self) {
let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
- let ttbr_value = ((asid as u64) << 48) | base_table_pa;
- #[cfg(target_arch = "aarch64")]
- match ttbr {
- TTBR::TTBR0_EL1 => unsafe {
- asm!(
- "msr ttbr0_el1, {0}
- isb",
- in(reg) ttbr_value)
- },
- TTBR::TTBR1_EL1 => unsafe {
- asm!(
- "msr ttbr1_el1, {0}
- isb
- tlbi vmalle1
- dsb sy
+ #[cfg(target_arch = "aarch64")]
+ match &self.regime {
+ TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
+ "msr ttbr0_el1, {0}
isb",
- in(reg) ttbr_value)
- },
+ in(reg) ((*asid as u64) << 48) | base_table_pa),
+ TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
+ "msr ttbr1_el1, {0}
+ isb",
+ in(reg) ((*asid as u64) << 48) | base_table_pa),
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
+ "msr ttbr0_el2, {0}
+ isb",
+ in(reg) ((*asid as u64) << 48) | base_table_pa),
+ #[cfg(target_feature = "vh")]
+ TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
+ "msr ttbr1_el2, {0}
+ isb",
+ in(reg) ((*asid as u64) << 48) | base_table_pa),
+ TranslationRegime::EL2 => core::arch::asm!(
+ "msr ttbr0_el2, {0}
+ isb",
+ in(reg) base_table_pa),
+ TranslationRegime::EL3 => core::arch::asm!(
+ "msr ttbr0_el3, {0}
+ isb",
+ in(reg) base_table_pa),
}
}