Add README and function documentation

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Idda9f54712a309c30a8684a073769973eb6fef03
diff --git a/README.md b/README.md
index 07cc40b..e2b71b1 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,26 @@
-# AArch64 Virtual Memory Translation Table handler library
+# AArch64 Virtual Memory Translation Table Handler Library
+
+## Features
+
+* Allocate and map data initialized range
+* Allocate and map zero initialized range
+* Map physical address range
+* Unmap virtual address range
+* Query phyisical address of virtual address
+* Set memory access rights
+
+### Translation regimes
+
+* EL1&0 stage 1 Upper/Lower VA ranges
+* EL2&0 stage 1 Upper/Lower VA ranges (VHE)
+* EL2 stage 1
+* EL3 stage 1
+
+### Translation granules
+
+* 4k
+* 16k
+* 64k
 
 --------------
 
diff --git a/src/address.rs b/src/address.rs
index c5cb6ff..2ccea8c 100644
--- a/src/address.rs
+++ b/src/address.rs
@@ -1,12 +1,15 @@
 // SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
 // SPDX-License-Identifier: MIT OR Apache-2.0
 
+//! Objects for representing physical and virtual addresses
+
 use core::{fmt, ops::Range};
 
 use crate::TranslationRegime;
 
 use super::TranslationGranule;
 
+/// Physical address object
 #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
 pub struct PhysicalAddress(pub(super) usize);
 
@@ -19,6 +22,7 @@
         Self(address)
     }
 
+    /// Add offset to the physical address and check for overflow
     pub const fn add_offset(self, offset: usize) -> Option<Self> {
         if let Some(address) = self.0.checked_add(offset) {
             Some(Self(address))
@@ -27,10 +31,12 @@
         }
     }
 
+    /// Identity map physical address to virtual address
     pub const fn identity_va(self) -> VirtualAddress {
         VirtualAddress(self.0)
     }
 
+    /// Calculate difference of physical addresses
     pub const fn diff(self, rhs: Self) -> Option<usize> {
         self.0.checked_sub(rhs.0)
     }
@@ -56,6 +62,7 @@
     }
 }
 
+/// Virtual address object
 #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
 pub struct VirtualAddress(pub(super) usize);
 
@@ -68,6 +75,7 @@
         Self(address)
     }
 
+    /// Add offset to the virtual address and check for overflow
     pub const fn add_offset(self, offset: usize) -> Option<Self> {
         if let Some(address) = self.0.checked_add(offset) {
             Some(Self(address))
@@ -76,10 +84,12 @@
         }
     }
 
+    /// Identity map virtual address to  physical address
     pub const fn identity_pa(self) -> PhysicalAddress {
         PhysicalAddress(self.0)
     }
 
+    /// Mask the lower bits of the virtual address for the given granule and level
     pub const fn mask_for_level<const VA_BITS: usize>(
         self,
         translation_granule: TranslationGranule<VA_BITS>,
@@ -88,6 +98,8 @@
         Self(self.0 & (translation_granule.block_size_at_level(level) - 1))
     }
 
+    /// Calculate the index of the virtual address in a translation table at the
+    /// given granule and level.
     pub const fn get_level_index<const VA_BITS: usize>(
         self,
         translation_granule: TranslationGranule<VA_BITS>,
@@ -96,6 +108,8 @@
         self.0 >> translation_granule.total_bits_at_level(level)
     }
 
+    /// Check if the address is valid in the translation regime, i.e. if the top bits match the
+    /// VA range.
     pub fn is_valid_in_regime<const VA_BITS: usize>(&self, regime: TranslationRegime) -> bool {
         let mask = Self::get_upper_bit_mask::<VA_BITS>();
         let required_upper_bits = if regime.is_upper_va_range() { mask } else { 0 };
@@ -103,6 +117,8 @@
         (self.0 & mask) == required_upper_bits
     }
 
+    /// Sets the upper bits of the virtual address according to the translation regime.
+    /// Fill with '1' bits for upper VA range, fill with '0' bits for lower VA range.
     pub fn set_upper_bits<const VA_BITS: usize>(self, regime: TranslationRegime) -> Self {
         let mask = Self::get_upper_bit_mask::<VA_BITS>();
 
@@ -113,18 +129,22 @@
         })
     }
 
+    /// Remove top bits, i.e fill top bits with zeroes.
     pub fn remove_upper_bits<const VA_BITS: usize>(self) -> Self {
         Self(self.0 & !Self::get_upper_bit_mask::<VA_BITS>())
     }
 
+    /// Mask bits in the address
     pub const fn mask_bits(self, mask: usize) -> Self {
         Self(self.0 & mask)
     }
 
+    /// Calculate difference of virtual addresses
     pub const fn diff(self, rhs: Self) -> Option<usize> {
         self.0.checked_sub(rhs.0)
     }
 
+    /// Align address to the next aligned address
     pub const fn align_up(self, alignment: usize) -> Self {
         Self(self.0.next_multiple_of(alignment))
     }
@@ -154,6 +174,7 @@
     }
 }
 
+/// Represents a virtual address range
 #[derive(Debug)]
 pub struct VirtualAddressRange {
     pub(super) start: VirtualAddress,
@@ -176,10 +197,12 @@
         )
     }
 
+    /// The length of the range in bytes
     pub fn len(&self) -> Option<usize> {
         self.end.diff(self.start)
     }
 
+    /// Create an iterator which returns virtual addresses in the range of the given step in bytes.
     pub fn step_by(self, step: usize) -> VirtualAddressIterator {
         VirtualAddressIterator {
             next: self.start,
@@ -189,6 +212,7 @@
     }
 }
 
+/// Iterator for walking the virtual address range using the given step.
 pub struct VirtualAddressIterator {
     next: VirtualAddress,
     end: VirtualAddress,
diff --git a/src/lib.rs b/src/lib.rs
index a294d14..c939335 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -3,6 +3,7 @@
 
 #![allow(dead_code)]
 #![cfg_attr(not(test), no_std)]
+#![doc = include_str!("../README.md")]
 
 extern crate alloc;
 
@@ -62,19 +63,30 @@
 }
 
 bitflags! {
+    /// Memory access rights
     #[derive(Debug, Clone, Copy)]
     pub struct MemoryAccessRights : u32 {
+        /// Read
         const R  = 0b00000001;
+        /// Write
         const W  = 0b00000010;
+        /// Execute
         const X  = 0b00000100;
+        /// Non-secure
         const NS = 0b00001000;
 
+        /// Read-write
         const RW = Self::R.bits() | Self::W.bits();
+        /// Read-execute
         const RX = Self::R.bits() | Self::X.bits();
+        /// Read-write-execute
         const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
 
+        /// User accessible
         const USER = 0b00010000;
+        /// Device region
         const DEVICE = 0b00100000;
+        /// Global (not tied to ASID)
         const GLOBAL = 0b01000000;
     }
 }
@@ -113,22 +125,31 @@
     }
 }
 
+/// Virtual Address range, selects x in `TTBRx_EL*`
 #[derive(Debug, Clone, Copy)]
 pub enum RegimeVaRange {
+    /// Lower virtual address range, select `TTBR0_EL*`
     Lower,
+    /// Upper virtual address range, select `TTBR1_EL*`
     Upper,
 }
 
+/// Translation regime
 #[derive(Debug, Clone, Copy)]
 pub enum TranslationRegime {
-    EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
+    /// EL1 and EL0 stage 1, TTBRx_EL1
+    EL1_0(RegimeVaRange, u8),
     #[cfg(target_feature = "vh")]
-    EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
-    EL2,                      // EL2
-    EL3,                      // EL3, TTBR0_EL3
+    /// EL2 and EL0 with VHE
+    EL2_0(RegimeVaRange, u8),
+    /// EL2
+    EL2,
+    /// EL3, TTBR0_EL3
+    EL3,
 }
 
 impl TranslationRegime {
+    /// Checks if the translation regime uses the upper virtual address range.
     fn is_upper_va_range(&self) -> bool {
         match self {
             TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
@@ -139,12 +160,15 @@
     }
 }
 
+/// Translation granule
 pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
 
 /// Trait for converting between virtual address space of the running kernel environment and
 /// the physical address space.
 pub trait KernelAddressTranslator {
+    /// Convert virtual address of the running kernel environment into a physical address.
     fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
+    /// Convert physical address into a virtual address of the running kernel environment.
     fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
 }
 
@@ -158,6 +182,7 @@
 }
 
 /// Memory translation table handling
+///
 /// # High level interface
 /// * allocate and map zero initialized region (with or without VA)
 /// * allocate and map memory region and load contents (with or without VA)
@@ -176,13 +201,20 @@
 /// * find a mapped region which contains
 /// * find empty area for region
 /// * set access rights for a region
-/// * create blocks by region
 ///
 /// # Block level interface
 /// * map block
 /// * unmap block
 /// * set access rights of block
 impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
+    /// Create new Xlat instance
+    /// # Arguments
+    /// * page_pool: Page pool to allocate translation tables
+    /// * address: Virtual address range
+    /// * regime: Translation regime
+    /// * granule: Translation granule
+    /// # Return value
+    /// * Xlat instance
     pub fn new(
         page_pool: PagePool,
         address: VirtualAddressRange,
@@ -519,6 +551,7 @@
     /// * level: Level of the translation table
     /// * va: Base virtual address of the table
     /// * table: Table entries
+    /// * granule: Translation granule
     fn dump_table(
         f: &mut fmt::Formatter<'_>,
         level: isize,
@@ -566,6 +599,7 @@
     /// uses the block level functions to do the mapping.
     /// # Arguments
     /// * region: Memory region object
+    /// * attributes: Memory attributes
     /// # Return value
     /// * Virtual address of the mapped memory
     fn map_region(
@@ -606,7 +640,8 @@
 
     /// Find mapped region that contains the whole region
     /// # Arguments
-    /// * region: Virtual address to look for
+    /// * va: Virtual address to look for
+    /// * length: Length of the region
     /// # Return value
     /// * Reference to virtual region if found
     fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {