libkernel: move paging-related arch-agnostic traits out of arch mod

Any traits which are architecture agnostic should be available to be
implemented by other architectures. Move them out from the arm64 module
into a non-architecure specific location.
This commit is contained in:
Matthew Leach
2026-04-12 20:29:03 +01:00
parent 465fcb7acb
commit ccffa50a0e
18 changed files with 249 additions and 227 deletions

View File

@@ -7,52 +7,9 @@ use tock_registers::{register_bitfields, registers::InMemoryRegister};
use crate::memory::PAGE_SHIFT;
use crate::memory::address::{PA, VA};
use crate::memory::paging::permissions::PtePermissions;
use crate::memory::paging::{PaMapper, PageTableEntry, TableMapper};
use crate::memory::region::PhysMemoryRegion;
/// Trait for common behavior across different types of page table entries.
pub trait PageTableEntry: Sized + Copy + Clone {
/// Returns `true` if the entry is valid (i.e., not an Invalid/Fault entry).
fn is_valid(self) -> bool;
/// Returns the raw value of this page descriptor.
fn as_raw(self) -> u64;
/// Returns a representation of the page descriptor from a raw value.
fn from_raw(v: u64) -> Self;
/// Return a new invalid page descriptor.
fn invalid() -> Self;
}
/// Trait for descriptors that can point to a next-level table.
pub trait TableMapper: PageTableEntry {
/// Returns the physical address of the next-level table, if this descriptor
/// is a table descriptor.
fn next_table_address(self) -> Option<PA>;
/// Creates a new descriptor that points to the given next-level table.
fn new_next_table(pa: PA) -> Self;
}
/// A descriptor that maps a physical address (L1, L2 blocks and L3 page).
pub trait PaMapper: PageTableEntry {
/// A type that encodes different types of memory for this architecture.
type MemoryType;
/// Constructs a new valid page descriptor that maps a physical address.
fn new_map_pa(page_address: PA, memory_type: MemoryType, perms: PtePermissions) -> Self;
/// Return how many bytes this descriptor type maps.
fn map_shift() -> usize;
/// Whether a subsection of the region could be mapped via this type of
/// page.
fn could_map(region: PhysMemoryRegion, va: VA) -> bool;
/// Return the mapped physical address.
fn mapped_address(self) -> Option<PA>;
}
#[derive(Clone, Copy)]
struct TableAddr(PA);
@@ -95,10 +52,12 @@ macro_rules! define_descriptor {
pub struct $name(u64);
impl PageTableEntry for $name {
type RawDescriptor = u64;
const INVALID: u64 = 0;
fn is_valid(self) -> bool { (self.0 & 0b11) != 0 }
fn as_raw(self) -> u64 { self.0 }
fn from_raw(v: u64) -> Self { Self(v) }
fn invalid() -> Self { Self(0) }
fn as_raw(self) -> Self::RawDescriptor { self.0 }
fn from_raw(v: Self::RawDescriptor) -> Self { Self(v) }
}
$(
@@ -204,8 +163,7 @@ macro_rules! define_descriptor {
impl PaMapper for $name {
type MemoryType = MemoryType;
fn map_shift() -> usize { $tbl_shift }
const MAP_SHIFT: usize = $tbl_shift;
fn could_map(region: PhysMemoryRegion, va: VA) -> bool {
let is_aligned = |addr: usize| (addr & ((1 << $tbl_shift) - 1)) == 0;

View File

@@ -1,77 +1,19 @@
//! AArch64 page table structures, levels, and mapping logic.
use core::marker::PhantomData;
use super::{
pg_descriptors::{
L0Descriptor, L1Descriptor, L2Descriptor, L3Descriptor, MemoryType, PaMapper,
PageTableEntry, TableMapper,
},
tlb::TLBInvalidator,
};
use super::pg_descriptors::{L0Descriptor, L1Descriptor, L2Descriptor, L3Descriptor, MemoryType};
use crate::{
error::{MapError, Result},
memory::{
PAGE_SIZE,
address::{TPA, TVA, VA},
paging::permissions::PtePermissions,
paging::{
PaMapper, PageAllocator, PageTableEntry, PageTableMapper, PgTable, PgTableArray,
TLBInvalidator, TableMapper, permissions::PtePermissions,
},
region::{PhysMemoryRegion, VirtMemoryRegion},
},
};
/// Number of page table descriptors that fit in a single 4 KiB page.
pub const DESCRIPTORS_PER_PAGE: usize = PAGE_SIZE / core::mem::size_of::<u64>();
/// Bitmask used to extract the page table index from a shifted virtual address.
pub const LEVEL_MASK: usize = DESCRIPTORS_PER_PAGE - 1;
/// Trait representing a single level of the page table hierarchy.
///
/// Each implementor corresponds to a specific page table level (L0, L1, L2,
/// L3), characterized by its `SHIFT` value which determines the bits of the
/// virtual address used to index into the table.
///
/// # Associated Types
/// - `Descriptor`: The type representing an individual page table entry (PTE) at this level.
///
/// # Constants
/// - `SHIFT`: The bit position to shift the virtual address to obtain the index for this level.
///
/// # Provided Methods
/// - `pg_index(va: VA) -> usize`: Calculate the index into the page table for the given virtual address.
///
/// # Required Methods
/// - `get_desc(&self, va: VA) -> Self::Descriptor`: Retrieve the descriptor
/// (PTE) for the given virtual address.
/// - `get_desc_mut(&mut self, va: VA) -> &mut Self::Descriptor`: Get a mutable
/// reference to the descriptor, allowing updates.
pub trait PgTable: Clone + Copy {
/// Bit shift used to extract the index for this page table level.
const SHIFT: usize;
/// The descriptor (page table entry) type for this level.
type Descriptor: PageTableEntry;
/// Constructs this table handle from a typed virtual pointer to its backing array.
fn from_ptr(ptr: TVA<PgTableArray<Self>>) -> Self;
/// Returns the raw mutable pointer to the underlying descriptor array.
fn to_raw_ptr(self) -> *mut u64;
/// Compute the index into this page table from a virtual address.
fn pg_index(va: VA) -> usize {
(va.value() >> Self::SHIFT) & LEVEL_MASK
}
/// Get the descriptor for a given virtual address.
fn get_desc(self, va: VA) -> Self::Descriptor;
/// Get the descriptor for a given index.
fn get_idx(self, idx: usize) -> Self::Descriptor;
/// Set the value of the descriptor for a particular VA.
fn set_desc(self, va: VA, desc: Self::Descriptor, invalidator: &dyn TLBInvalidator);
}
pub(super) trait TableMapperTable: PgTable<Descriptor: TableMapper> + Clone + Copy {
type NextLevel: PgTable;
@@ -87,30 +29,6 @@ pub(super) trait TableMapperTable: PgTable<Descriptor: TableMapper> + Clone + Co
}
}
/// A page-aligned array of raw page table entries for a given table level.
#[derive(Clone)]
#[repr(C, align(4096))]
pub struct PgTableArray<K: PgTable> {
pages: [u64; DESCRIPTORS_PER_PAGE],
_phantom: PhantomData<K>,
}
impl<K: PgTable> PgTableArray<K> {
/// Creates a zeroed page table array (all entries invalid).
pub const fn new() -> Self {
Self {
pages: [0; DESCRIPTORS_PER_PAGE],
_phantom: PhantomData,
}
}
}
impl<K: PgTable> Default for PgTableArray<K> {
fn default() -> Self {
Self::new()
}
}
macro_rules! impl_pgtable {
($(#[$outer:meta])* $table:ident, $shift:expr, $desc_type:ident) => {
#[derive(Clone, Copy)]
@@ -134,7 +52,7 @@ macro_rules! impl_pgtable {
}
fn get_idx(self, idx: usize) -> Self::Descriptor {
debug_assert!(idx < DESCRIPTORS_PER_PAGE);
debug_assert!(idx < Self::DESCRIPTORS_PER_PAGE);
let raw = unsafe { self.base.add(idx).read_volatile() };
Self::Descriptor::from_raw(raw)
}
@@ -175,54 +93,6 @@ impl TableMapperTable for L2Table {
impl_pgtable!(/// Level 3 page table (4 KiB per entry).
L3Table, 12, L3Descriptor);
/// Trait for temporarily mapping and modifying a page table located at a
/// physical address.
///
/// During early boot, there are multiple mechanisms for accessing page table memory:
/// - Identity mapping (idmap): active very early when VA = PA
/// - Fixmap: a small, reserved region of virtual memory used to map arbitrary
/// PAs temporarily
/// - Page-offset (linear map/logical map): when VA = PA + offset, typically
/// used after MMU init
///
/// This trait abstracts over those mechanisms by providing a unified way to
/// safely access and mutate a page table given its physical address.
///
/// # Safety
/// This function is `unsafe` because the caller must ensure:
/// - The given physical address `pa` is valid and correctly aligned for type `T`.
/// - The contents at that physical address represent a valid page table of type `T`.
pub trait PageTableMapper {
/// Map a physical address to a usable reference of the page table, run the
/// closure, and unmap.
///
/// # Safety
/// This function is `unsafe` because the caller must ensure:
/// - The given physical address `pa` is valid and correctly aligned for type `T`.
/// - The contents at that physical address represent a valid page table of type `T`.
unsafe fn with_page_table<T: PgTable, R>(
&mut self,
pa: TPA<PgTableArray<T>>,
f: impl FnOnce(TVA<PgTableArray<T>>) -> R,
) -> Result<R>;
}
/// Trait for allocating new page tables during address space setup.
///
/// The page table walker uses this allocator to request fresh page tables
/// when needed (e.g., when creating new levels in the page table hierarchy).
///
/// # Responsibilities
/// - Return a valid, zeroed (or otherwise ready) page table physical address wrapped in `TPA<T>`.
/// - Ensure the allocated page table meets the alignment and size requirements of type `T`.
pub trait PageAllocator {
/// Allocate a new page table of type `T` and return its physical address.
///
/// # Errors
/// Returns an error if allocation fails (e.g., out of memory).
fn allocate_page_table<T: PgTable>(&mut self) -> Result<TPA<PgTableArray<T>>>;
}
/// Describes the attributes of a memory range to be mapped.
pub struct MapAttributes {
/// The contiguous physical memory region to be mapped. Must be
@@ -231,8 +101,7 @@ pub struct MapAttributes {
/// The target virtual memory region. Must be page-aligned and have the same
/// size as `phys`.
pub virt: VirtMemoryRegion,
/// The memory attributes (e.g., `MemoryType::Normal`, `MemoryType::Device`)
/// for the mapping.
/// The architecture-specific memory attributes for the mapping.
pub mem_type: MemoryType,
/// The access permissions (read/write/execute, user/kernel) for the
/// mapping.
@@ -386,7 +255,7 @@ fn try_map_pa<L, PA, PM>(
ctx: &mut MappingContext<PA, PM>,
) -> Result<Option<usize>>
where
L: PgTable<Descriptor: PaMapper>,
L: PgTable<Descriptor: PaMapper<MemoryType = MemoryType>>,
PA: PageAllocator,
PM: PageTableMapper,
{
@@ -416,7 +285,7 @@ where
})?;
}
Ok(Some(1 << (L::Descriptor::map_shift() - 12)))
Ok(Some(1 << (L::Descriptor::MAP_SHIFT - 12)))
} else {
Ok(None)
}

View File

@@ -1,15 +1,13 @@
//! Utilities for tearing down and freeing page table hierarchies.
use super::pg_descriptors::{PaMapper, TableMapper};
use super::pg_tables::L0Table;
use super::{
pg_tables::{
DESCRIPTORS_PER_PAGE, L3Table, PageTableMapper, PgTable, PgTableArray, TableMapperTable,
},
pg_tables::{L3Table, TableMapperTable},
pg_walk::WalkContext,
};
use crate::error::Result;
use crate::memory::address::{PA, TPA};
use crate::memory::paging::{PaMapper, PageTableMapper, PgTable, PgTableArray, TableMapper};
trait RecursiveTeardownWalker: PgTable + Sized {
fn tear_down<F, PM>(
@@ -44,7 +42,7 @@ where
ctx.mapper.with_page_table(table_pa, |pgtable| {
let table = Self::from_ptr(pgtable);
for i in cursor..DESCRIPTORS_PER_PAGE {
for i in cursor..<T as PgTable>::DESCRIPTORS_PER_PAGE {
let desc = table.get_idx(i);
if let Some(addr) = desc.next_table_address() {
@@ -91,7 +89,7 @@ impl RecursiveTeardownWalker for L3Table {
ctx.mapper.with_page_table(table_pa, |pgtable| {
let table = L3Table::from_ptr(pgtable);
for idx in 0..DESCRIPTORS_PER_PAGE {
for idx in 0..Self::DESCRIPTORS_PER_PAGE {
let desc = table.get_idx(idx);
if let Some(addr) = desc.mapped_address() {

View File

@@ -1,15 +1,18 @@
//! Page table walking and per-entry modification.
use super::{
pg_descriptors::{L3Descriptor, PageTableEntry, TableMapper},
pg_tables::{L0Table, L3Table, PageTableMapper, PgTable, PgTableArray, TableMapperTable},
tlb::{NullTlbInvalidator, TLBInvalidator},
pg_descriptors::L3Descriptor,
pg_tables::{L0Table, L3Table, TableMapperTable},
tlb::NullTlbInvalidator,
};
use crate::{
error::{MapError, Result},
memory::{
PAGE_SIZE,
address::{TPA, VA},
paging::{
PageTableEntry, PageTableMapper, PgTable, PgTableArray, TLBInvalidator, TableMapper,
},
region::VirtMemoryRegion,
},
};
@@ -183,12 +186,13 @@ pub fn get_pte<PM: PageTableMapper>(
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::arm64::memory::pg_descriptors::{L2Descriptor, MemoryType, PaMapper};
use crate::arch::arm64::memory::pg_descriptors::{L2Descriptor, MemoryType};
use crate::arch::arm64::memory::pg_tables::tests::TestHarness;
use crate::arch::arm64::memory::pg_tables::{L1Table, L2Table, map_at_level};
use crate::error::KernelError;
use crate::memory::PAGE_SIZE;
use crate::memory::address::{PA, VA};
use crate::memory::paging::PaMapper;
use crate::memory::paging::permissions::PtePermissions;
use std::sync::atomic::{AtomicUsize, Ordering};

View File

@@ -1,7 +1,6 @@
//! TLB invalidation helpers.
/// Trait for invalidating TLB entries after page table modifications.
pub trait TLBInvalidator {}
use crate::memory::paging::TLBInvalidator;
/// A no-op TLB invalidator used when invalidation is unnecessary.
pub struct NullTlbInvalidator {}

View File

@@ -1,3 +1,200 @@
//! Architecture agnostic paging-related traits and types.
use super::{
PAGE_SIZE,
address::{PA, TPA, TVA, VA},
region::PhysMemoryRegion,
};
use core::marker::PhantomData;
use permissions::PtePermissions;
pub mod permissions;
/// Trait for common behavior across different types of page table entries.
pub trait PageTableEntry: Sized + Copy + Clone {
/// The raw pod-type used for the descriptor.
type RawDescriptor: Sized + Copy + Clone;
/// The raw value for an invalid (not present) descriptor.
const INVALID: Self::RawDescriptor;
/// Returns `true` if the entry is valid (i.e., not an Invalid/Fault entry).
fn is_valid(self) -> bool;
/// Returns the raw value of this page descriptor.
fn as_raw(self) -> Self::RawDescriptor;
/// Returns a representation of the page descriptor from a raw value.
fn from_raw(v: Self::RawDescriptor) -> Self;
/// Return a new invalid page descriptor.
fn invalid() -> Self {
Self::from_raw(Self::INVALID)
}
}
/// Trait for descriptors that can point to a next-level table.
pub trait TableMapper: PageTableEntry {
/// Returns the physical address of the next-level table, if this descriptor
/// is a table descriptor.
fn next_table_address(self) -> Option<PA>;
/// Creates a new descriptor that points to the given next-level table.
fn new_next_table(pa: PA) -> Self;
}
/// A descriptor that maps a physical address at the page and block level.
pub trait PaMapper: PageTableEntry {
/// The memory attribute type for this descriptor's architecture.
type MemoryType: Copy;
/// How many bytes this descriptor type maps.
const MAP_SHIFT: usize;
/// Constructs a new valid page descriptor that maps a physical address.
fn new_map_pa(page_address: PA, memory_type: Self::MemoryType, perms: PtePermissions) -> Self;
/// Whether a subsection of the region could be mapped via this type of
/// page.
fn could_map(region: PhysMemoryRegion, va: VA) -> bool;
/// Return the mapped physical address.
fn mapped_address(self) -> Option<PA>;
}
/// Trait representing a single level of the page table hierarchy.
///
/// Each implementor corresponds to a specific page table level, characterized
/// by its `SHIFT` value which determines the bits of the virtual address used
/// to index into the table.
///
/// # Associated Types
/// - `Descriptor`: The type representing an individual page table entry (PTE)
/// at this level.
///
/// # Constants
/// - `SHIFT`: The bit position to shift the virtual address to obtain the index
/// for this level.
/// - `DESCRIPTORS_PER_PAGE`: The number of PTE that are present in a single
/// page.
/// - `LEVEL_MASK`: The mask that should be applied after `SHIFT` to obtain the
/// descriptor index.
///
/// # Provided Methods
/// - `pg_index(va: VA) -> usize`: Calculate the index into the page table for
/// the given virtual address.
///
/// # Required Methods
/// - `get_desc(&self, va: VA) -> Self::Descriptor`: Retrieve the descriptor
/// (PTE) for the given virtual address.
/// - `get_desc_mut(&mut self, va: VA) -> &mut Self::Descriptor`: Get a mutable
/// reference to the descriptor, allowing updates.
pub trait PgTable: Clone + Copy {
/// Number of page table descriptors that fit in a single 4 KiB page.
const DESCRIPTORS_PER_PAGE: usize =
PAGE_SIZE / core::mem::size_of::<<Self::Descriptor as PageTableEntry>::RawDescriptor>();
/// Bitmask used to extract the page table index from a shifted virtual address.
const LEVEL_MASK: usize = Self::DESCRIPTORS_PER_PAGE - 1;
/// Bit shift used to extract the index for this page table level.
const SHIFT: usize;
/// The descriptor (page table entry) type for this level.
type Descriptor: PageTableEntry;
/// Constructs this table handle from a typed virtual pointer to its backing array.
fn from_ptr(ptr: TVA<PgTableArray<Self>>) -> Self;
/// Returns the raw mutable pointer to the underlying descriptor array.
fn to_raw_ptr(self) -> *mut u64;
/// Compute the index into this page table from a virtual address.
fn pg_index(va: VA) -> usize {
(va.value() >> Self::SHIFT) & Self::LEVEL_MASK
}
/// Get the descriptor for a given virtual address.
fn get_desc(self, va: VA) -> Self::Descriptor;
/// Get the descriptor for a given index.
fn get_idx(self, idx: usize) -> Self::Descriptor;
/// Set the value of the descriptor for a particular VA.
fn set_desc(self, va: VA, desc: Self::Descriptor, invalidator: &dyn TLBInvalidator);
}
/// A page-aligned array of raw page table entries for a given table level.
#[derive(Clone)]
#[repr(C, align(4096))]
pub struct PgTableArray<K: PgTable, const N: usize = 512> {
pages: [<K::Descriptor as PageTableEntry>::RawDescriptor; N],
_phantom: PhantomData<K>,
}
impl<K: PgTable, const N: usize> PgTableArray<K, N> {
/// Creates a zeroed page table array (all entries invalid).
pub const fn new() -> Self {
Self {
pages: [K::Descriptor::INVALID; N],
_phantom: PhantomData,
}
}
}
impl<K: PgTable, const N: usize> Default for PgTableArray<K, N> {
fn default() -> Self {
Self::new()
}
}
/// Trait for temporarily mapping and modifying a page table located at a
/// physical address.
///
/// During early boot, there are multiple mechanisms for accessing page table memory:
/// - Identity mapping (idmap): active very early when VA = PA
/// - Fixmap: a small, reserved region of virtual memory used to map arbitrary
/// PAs temporarily
/// - Page-offset (linear map/logical map): when VA = PA + offset, typically
/// used after MMU init
///
/// This trait abstracts over those mechanisms by providing a unified way to
/// safely access and mutate a page table given its physical address.
///
/// # Safety
/// This function is `unsafe` because the caller must ensure:
/// - The given physical address `pa` is valid and correctly aligned for type `T`.
/// - The contents at that physical address represent a valid page table of type `T`.
pub trait PageTableMapper {
/// Map a physical address to a usable reference of the page table, run the
/// closure, and unmap.
///
/// # Safety
/// This function is `unsafe` because the caller must ensure:
/// - The given physical address `pa` is valid and correctly aligned for type `T`.
/// - The contents at that physical address represent a valid page table of type `T`.
unsafe fn with_page_table<T: PgTable, R>(
&mut self,
pa: TPA<PgTableArray<T>>,
f: impl FnOnce(TVA<PgTableArray<T>>) -> R,
) -> crate::error::Result<R>;
}
/// Trait for allocating new page tables during address space setup.
///
/// The page table walker uses this allocator to request fresh page tables
/// when needed (e.g., when creating new levels in the page table hierarchy).
///
/// # Responsibilities
/// - Return a valid, zeroed (or otherwise ready) page table physical address wrapped in `TPA<T>`.
/// - Ensure the allocated page table meets the alignment and size requirements of type `T`.
pub trait PageAllocator {
/// Allocate a new page table of type `T` and return its physical address.
///
/// # Errors
/// Returns an error if allocation fails (e.g., out of memory).
fn allocate_page_table<T: PgTable>(&mut self) -> crate::error::Result<TPA<PgTableArray<T>>>;
}
/// Trait for invalidating TLB entries after page table modifications.
pub trait TLBInvalidator {}

View File

@@ -9,14 +9,14 @@ use libkernel::{
arch::arm64::memory::{
pg_descriptors::MemoryType,
pg_tables::{
L0Table, MapAttributes, MappingContext, PageTableMapper, PgTable, PgTableArray,
L0Table, MapAttributes, MappingContext,
map_range,
},
},
error::Result,
memory::{
address::{TPA, TVA},
paging::permissions::PtePermissions,
paging::{PageTableMapper, PgTable, PgTableArray, permissions::PtePermissions},
},
};

View File

@@ -8,13 +8,13 @@ use core::ptr::NonNull;
use libkernel::{
arch::arm64::memory::{
pg_descriptors::MemoryType,
pg_tables::{L0Table, MapAttributes, MappingContext, PgTableArray, map_range},
pg_tables::{L0Table, MapAttributes, MappingContext, map_range},
},
error::{KernelError, Result},
memory::{
PAGE_SIZE,
address::{PA, TPA, VA},
paging::permissions::PtePermissions,
paging::{PgTableArray, permissions::PtePermissions},
region::{PhysMemoryRegion, VirtMemoryRegion},
},
};

View File

@@ -27,11 +27,11 @@ use aarch64_cpu::{
use core::arch::global_asm;
use libkernel::{
CpuOps,
arch::arm64::memory::pg_tables::{L0Table, PgTableArray},
arch::arm64::memory::pg_tables::L0Table,
error::Result,
memory::{
address::{PA, TPA, VA},
allocators::{phys::FrameAllocator, slab::allocator::SlabAllocator},
allocators::{phys::FrameAllocator, slab::allocator::SlabAllocator}, paging::PgTableArray,
},
sync::per_cpu::setup_percpu,
};

View File

@@ -4,12 +4,13 @@ use aarch64_cpu::asm::barrier;
use aarch64_cpu::registers::{MAIR_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1};
use libkernel::arch::arm64::memory::pg_descriptors::MemoryType;
use libkernel::arch::arm64::memory::pg_tables::{
L0Table, MapAttributes, MappingContext, PageAllocator, PageTableMapper, PgTable, PgTableArray,
L0Table, MapAttributes, MappingContext,
map_range,
};
use libkernel::arch::arm64::memory::tlb::NullTlbInvalidator;
use libkernel::error::{KernelError, Result};
use libkernel::memory::address::{AddressTranslator, IdentityTranslator, PA, TPA, TVA};
use libkernel::memory::paging::{PageAllocator, PageTableMapper, PgTable, PgTableArray};
use libkernel::memory::paging::permissions::PtePermissions;
use libkernel::memory::region::PhysMemoryRegion;
use libkernel::memory::{PAGE_MASK, PAGE_SIZE};

View File

@@ -11,9 +11,9 @@ use aarch64_cpu::{
use alloc::vec::Vec;
use libkernel::{
arch::arm64::memory::{
pg_descriptors::{L3Descriptor, MemoryType, PaMapper, PageTableEntry},
pg_descriptors::{L3Descriptor, MemoryType},
pg_tables::{
L0Table, MapAttributes, MappingContext, PageAllocator, PgTableArray, map_range,
L0Table, MapAttributes, MappingContext, map_range,
},
pg_tear_down::tear_down_address_space,
pg_walk::{WalkContext, get_pte, walk_and_modify_region},
@@ -23,7 +23,7 @@ use libkernel::{
PAGE_SIZE,
address::{TPA, VA},
page::PageFrame,
paging::permissions::PtePermissions,
paging::{PaMapper, PageAllocator, PageTableEntry, PgTableArray, permissions::PtePermissions},
proc_vm::address_space::{PageInfo, UserAddressSpace},
region::{PhysMemoryRegion, VirtMemoryRegion},
},

View File

@@ -6,17 +6,14 @@ use core::{
};
use libkernel::{
arch::arm64::memory::{
pg_descriptors::{
L0Descriptor, L1Descriptor, L2Descriptor, L3Descriptor, MemoryType, PaMapper,
PageTableEntry, TableMapper,
},
pg_tables::{L0Table, L1Table, L2Table, L3Table, PgTable, PgTableArray},
pg_descriptors::{L0Descriptor, L1Descriptor, L2Descriptor, L3Descriptor, MemoryType},
pg_tables::{L0Table, L1Table, L2Table, L3Table},
},
error::{KernelError, Result},
memory::{
PAGE_SIZE,
address::{IdentityTranslator, TPA, TVA, VA},
paging::permissions::PtePermissions,
paging::{PaMapper, PageTableEntry, PgTable, PgTableArray, TableMapper, permissions::PtePermissions},
region::PhysMemoryRegion,
},
};

View File

@@ -2,14 +2,14 @@ use super::{MMIO_BASE, tlb::AllEl1TlbInvalidator};
use crate::sync::{OnceLock, SpinLock};
use libkernel::{
arch::arm64::memory::{
pg_descriptors::{MemoryType, PaMapper},
pg_tables::{L0Table, MapAttributes, MappingContext, PgTableArray, map_range},
pg_descriptors::MemoryType,
pg_tables::{L0Table, MapAttributes, MappingContext, map_range},
pg_walk::get_pte,
},
error::Result,
memory::{
address::{PA, TPA, VA},
paging::permissions::PtePermissions,
paging::{PaMapper, PgTableArray, permissions::PtePermissions},
proc_vm::address_space::KernAddressSpace,
region::{PhysMemoryRegion, VirtMemoryRegion},
},

View File

@@ -2,7 +2,7 @@ use core::marker::PhantomData;
use crate::memory::page::ClaimedPage;
use libkernel::{
arch::arm64::memory::pg_tables::{PageAllocator, PgTable, PgTableArray},
memory::paging::{PageAllocator, PgTable, PgTableArray},
error::Result,
memory::address::TPA,
};

View File

@@ -1,5 +1,5 @@
use libkernel::{
arch::arm64::memory::pg_tables::{PageTableMapper, PgTable, PgTableArray},
memory::paging::{PageTableMapper, PgTable, PgTableArray},
error::Result,
memory::address::{TPA, TVA},
};

View File

@@ -1,6 +1,6 @@
use crate::memory::PageOffsetTranslator;
use libkernel::{
arch::arm64::memory::pg_tables::{PageAllocator, PgTable, PgTableArray},
memory::paging::{PageAllocator, PgTable, PgTableArray},
error::Result,
memory::{PAGE_SIZE, address::TPA, allocators::smalloc::Smalloc},
};

View File

@@ -1,6 +1,6 @@
use core::arch::asm;
use libkernel::arch::arm64::memory::tlb::TLBInvalidator;
use libkernel::memory::paging::TLBInvalidator;
pub struct AllEl1TlbInvalidator;

View File

@@ -8,11 +8,10 @@ use cpu_ops::{local_irq_restore, local_irq_save};
use exceptions::ExceptionState;
use libkernel::{
CpuOps,
arch::arm64::memory::pg_tables::{L0Table, PgTableArray},
arch::arm64::memory::pg_tables::L0Table,
error::Result,
memory::{
address::{UA, VA},
proc_vm::address_space::VirtualMemory,
address::{UA, VA}, paging::PgTableArray, proc_vm::address_space::VirtualMemory
},
};
use memory::{