diff --git a/libkernel/src/memory/allocators/frame.rs b/libkernel/src/memory/allocators/frame.rs
index de4a9e1..3a7e5da 100644
--- a/libkernel/src/memory/allocators/frame.rs
+++ b/libkernel/src/memory/allocators/frame.rs
@@ -37,7 +37,8 @@ pub enum FrameState {
#[derive(Debug, Clone)]
pub struct Frame {
pub state: FrameState,
- pub link: LinkedListLink, // only used in free nodes.
+ // used in free nodes list (FA), Free and Partial Lists (SA).
+ pub link: LinkedListLink,
pub pfn: PageFrame,
}
diff --git a/libkernel/src/memory/allocators/phys.rs b/libkernel/src/memory/allocators/phys.rs
index 18ccd67..24ec8b6 100644
--- a/libkernel/src/memory/allocators/phys.rs
+++ b/libkernel/src/memory/allocators/phys.rs
@@ -40,17 +40,21 @@ impl FrameAllocatorInner {
pub(super) fn free_slab(&mut self, frame: UnsafeRef) {
assert!(matches!(frame.state, FrameState::Slab(_)));
- // SAFETY: The caller should guarntee exclusive ownership of this frame
- // as it's being passed back to the FA.
- let frame = unsafe { &mut *UnsafeRef::into_raw(frame) };
+ let pfn = frame.pfn;
- // Restore frame state data for slabs.
- frame.state = FrameState::AllocatedHead(AllocatedInfo {
- ref_count: 1,
- order: SLAB_FRAME_ALLOC_ORDER as _,
- });
+ {
+ // SAFETY: The caller should guarntee exclusive ownership of this frame
+ // as it's being passed back to the FA.
+ let frame = self.get_frame_mut(pfn);
- self.free_frames(PhysMemoryRegion::new(frame.pfn.pa(), SLAB_SIZE_BYTES));
+ // Restore frame state data for slabs.
+ frame.state = FrameState::AllocatedHead(AllocatedInfo {
+ ref_count: 1,
+ order: SLAB_FRAME_ALLOC_ORDER as _,
+ });
+ }
+
+ self.free_frames(PhysMemoryRegion::new(pfn.pa(), SLAB_SIZE_BYTES));
}
/// Frees a previously allocated block of frames.
diff --git a/libkernel/src/memory/allocators/slab/allocator.rs b/libkernel/src/memory/allocators/slab/allocator.rs
index 9fe3413..008cf7a 100644
--- a/libkernel/src/memory/allocators/slab/allocator.rs
+++ b/libkernel/src/memory/allocators/slab/allocator.rs
@@ -93,7 +93,7 @@ impl, T: AddressTranslator<()>> SlabManager
// SAFETY: We hold a mutable reference for self, therefore we can be
// sure that we have exclusive access to all Frames owned by this
// SlabAllocInner.
- unsafe { &mut *UnsafeRef::into_raw(x) }
+ unsafe { self.frame_list.get_frame(x.pfn).as_mut_unchecked() }
}) && let FrameState::Slab(ref mut slab) = frame.state
&& let Some(ptr) = slab.alloc_object()
{
@@ -101,7 +101,7 @@ impl, T: AddressTranslator<()>> SlabManager
// If the slab is still partial, re-insert back into the partial
// list for further allocations.
self.partial
- .push_front(unsafe { UnsafeRef::from_raw(frame as *mut _) });
+ .push_front(unsafe { UnsafeRef::from_raw(frame as *const _) });
}
return Some(ptr);
@@ -109,8 +109,10 @@ impl, T: AddressTranslator<()>> SlabManager
if let Some(frame) = self.free.pop_front().map(|x| {
// SAFETY: As above.
- unsafe { &mut *UnsafeRef::into_raw(x) }
+ unsafe { self.frame_list.get_frame(x.pfn).as_mut_unchecked() }
}) {
+ self.free_list_sz -= 1;
+
let (ptr, state) = match frame.state {
FrameState::Slab(ref mut slab) => {
let ptr = slab.alloc_object().unwrap();
@@ -184,17 +186,19 @@ impl, T: AddressTranslator<()>> SlabManager
frame_list: &FrameList,
obj_shift: usize,
) -> (*mut Frame, SlabState) {
- match (unsafe { &mut (*frame) }).state {
- FrameState::AllocatedTail(ref tail_info) => {
+ let state = unsafe { &mut (*frame).state };
+ match state {
+ FrameState::AllocatedTail(tail_info) => {
let head_frame = frame_list.get_frame(tail_info.head);
do_free_obj(head_frame, ptr, frame_list, obj_shift)
}
- FrameState::Slab(ref mut slab) => {
+ FrameState::Slab(slab) => {
if slab.obj_shift() != obj_shift {
panic!("Slab allocator: Layout mismatch on free");
}
slab.put_object(ptr);
- (frame, slab.state())
+ let state = slab.state();
+ (frame, state)
}
_ => unreachable!("Slab allocation"),
}
@@ -204,7 +208,7 @@ impl, T: AddressTranslator<()>> SlabManager
};
// SAFETY: As above
- let frame = unsafe { &mut *frame };
+ let is_linked = unsafe { (*frame).link.is_linked() };
match state {
SlabState::Free => {
@@ -224,7 +228,7 @@ impl, T: AddressTranslator<()>> SlabManager
self.free_list_sz -= num_freed;
}
- if frame.link.is_linked() {
+ if is_linked {
// The frame *must* be linked in the partial list if linked.
unsafe { self.partial.cursor_mut_from_ptr(frame as _) }.remove();
}
@@ -235,8 +239,10 @@ impl, T: AddressTranslator<()>> SlabManager
self.free_list_sz += 1;
}
SlabState::Partial => {
- if !frame.link.is_linked() {
- // We must have free'd an object on a previously full slab.
+ if !is_linked {
+ // We must have free'd an object on a previously full slab
+ // for it to have not been linked.
+ //
// Insert into the partial list.
self.partial
.push_front(unsafe { UnsafeRef::from_raw(frame as _) });
diff --git a/libkernel/src/memory/allocators/slab/heap.rs b/libkernel/src/memory/allocators/slab/heap.rs
index b345456..b6dbdfc 100644
--- a/libkernel/src/memory/allocators/slab/heap.rs
+++ b/libkernel/src/memory/allocators/slab/heap.rs
@@ -266,132 +266,132 @@ mod tests {
let _ = get_fixture();
let _ = TestSlabGetter::global_slab_alloc();
- let num_threads = 8;
- let ops_per_thread = 100_000;
- let barrier = Arc::new(Barrier::new(num_threads));
+ for _ in 0..10 {
+ let num_threads = 8;
+ let ops_per_thread = 100_000;
+ let barrier = Arc::new(Barrier::new(num_threads));
- // Track allocated memory usage to verify leak detection later
- let initial_free_pages = get_fixture().allocator.free_pages();
- println!("Initial Free Pages: {}", initial_free_pages);
+ // Track allocated memory usage to verify leak detection later
+ let initial_free_pages = get_fixture().allocator.free_pages();
+ println!("Initial Free Pages: {}", initial_free_pages);
- let mut handles = vec![];
+ let mut handles = vec![];
- for t_idx in 0..num_threads {
- let barrier = barrier.clone();
+ for t_idx in 0..num_threads {
+ let barrier = barrier.clone();
- handles.push(thread::spawn(move || {
- TestHeap::init_for_this_cpu();
+ handles.push(thread::spawn(move || {
+ TestHeap::init_for_this_cpu();
- barrier.wait();
+ barrier.wait();
- let heap = TestHeap::new();
- let mut rng = rng();
+ let heap = TestHeap::new();
+ let mut rng = rng();
- // Track allocations: (Ptr, Layout, PatternByte)
- let mut allocations: Vec<(*mut u8, core::alloc::Layout, u8)> = Vec::new();
+ // Track allocations: (Ptr, Layout, PatternByte)
+ let mut allocations: Vec<(*mut u8, core::alloc::Layout, u8)> = Vec::new();
- for _ in 0..ops_per_thread {
- // Randomly decide to Alloc (70%) or Free (30%)
- // Bias towards Alloc to build up memory pressure
- if rng.random_bool(0.6) || allocations.is_empty() {
- // Allocation path
+ for _ in 0..ops_per_thread {
+ // Randomly decide to Alloc (70%) or Free (30%)
+ // Bias towards Alloc to build up memory pressure
+ if rng.random_bool(0.6) || allocations.is_empty() {
+ // Allocation path
- // Random size: biased to small (slab), occasional huge
- let size = if rng.random_bool(0.95) {
- rng.random_range(1..=2048) // Small (Slabs)
- } else {
- rng.random_range(4096..=16384) // Large (Pages)
- };
+ // Random size: biased to small (slab), occasional huge
+ let size = 1024;
- // Random alignment (power of 2)
- let align = 1 << rng.random_range(0..=6);
- let layout = core::alloc::Layout::from_size_align(size, align).unwrap();
+ // Random alignment (power of 2)
+ let align = 1024;
+ let layout = core::alloc::Layout::from_size_align(size, align).unwrap();
- unsafe {
- let ptr = heap.alloc(layout);
- assert!(!ptr.is_null(), "Allocation failed");
- assert_eq!(ptr as usize % align, 0, "Alignment violation");
+ unsafe {
+ let ptr = heap.alloc(layout);
+ assert!(!ptr.is_null(), "Allocation failed");
+ assert_eq!(ptr as usize % align, 0, "Alignment violation");
- // Write Pattern
- let pattern: u8 = rng.random();
- std::ptr::write_bytes(ptr, pattern, size);
+ // Write Pattern
+ let pattern: u8 = rng.random();
+ std::ptr::write_bytes(ptr, pattern, size);
- allocations.push((ptr, layout, pattern));
- }
- } else {
- // Free Path.
-
- // Remove a random allocation from our list
- let idx = rng.random_range(0..allocations.len());
- let (ptr, layout, pattern) = allocations.swap_remove(idx);
-
- unsafe {
- // Verify Pattern
- let slice = std::slice::from_raw_parts(ptr, layout.size());
- for (i, &byte) in slice.iter().enumerate() {
- assert_eq!(
- byte, pattern,
- "Memory Corruption detected in thread {} at byte {}",
- t_idx, i
- );
+ allocations.push((ptr, layout, pattern));
}
+ } else {
+ // Free Path.
+ // Remove a random allocation from our list
+ let idx = rng.random_range(0..allocations.len());
+ let (ptr, layout, pattern) = allocations.swap_remove(idx);
+
+ unsafe {
+ // Verify Pattern
+ let slice = std::slice::from_raw_parts(ptr, layout.size());
+ for (i, &byte) in slice.iter().enumerate() {
+ assert_eq!(
+ byte, pattern,
+ "Memory Corruption detected in thread {} at byte {}",
+ t_idx, i
+ );
+ }
+
+ heap.dealloc(ptr, layout);
+ }
+ }
+ }
+
+ // Free everything.
+ for (ptr, layout, pattern) in allocations {
+ unsafe {
+ let slice = std::slice::from_raw_parts(ptr, layout.size());
+ for &byte in slice.iter() {
+ assert_eq!(byte, pattern, "Corruption detected during cleanup");
+ }
heap.dealloc(ptr, layout);
}
}
- }
- // Free everything.
- for (ptr, layout, pattern) in allocations {
+ // Purge the per-cpu caches.
+ let slab = SLAB_ALLOCATOR.get().unwrap();
+ ThreadLocalCacheStorage::get().purge_into(&slab);
+
+ let addr = ThreadLocalCacheStorage::get().deref() as *const SlabCache;
+
+ // Return the slab cache page.
unsafe {
- let slice = std::slice::from_raw_parts(ptr, layout.size());
- for &byte in slice.iter() {
- assert_eq!(byte, pattern, "Corruption detected during cleanup");
- }
- heap.dealloc(ptr, layout);
+ FIXTURE
+ .get()
+ .unwrap()
+ .allocator
+ .alloc_from_region(PhysMemoryRegion::new(
+ PA::from_value(addr as usize),
+ PAGE_SIZE,
+ ));
}
- }
-
- // Purge the per-cpu caches.
- let slab = SLAB_ALLOCATOR.get().unwrap();
- ThreadLocalCacheStorage::get().purge_into(&slab);
-
- let addr = ThreadLocalCacheStorage::get().deref() as *const SlabCache;
-
- // Return the slab cache page.
- unsafe {
- FIXTURE
- .get()
- .unwrap()
- .allocator
- .alloc_from_region(PhysMemoryRegion::new(
- PA::from_value(addr as usize),
- PAGE_SIZE,
- ));
- }
- }))
- }
-
- // Wait for all threads
- for h in handles {
- h.join().unwrap();
- }
-
- // Purge the all slab free lsts (the partial list should be empty).
- for slab_man in SLAB_ALLOCATOR.get().unwrap().managers.iter() {
- let mut frame_alloc = FIXTURE.get().unwrap().allocator.inner.lock_save_irq();
-
- let mut slab = slab_man.lock_save_irq();
-
- assert!(slab.partial.is_empty());
-
- while let Some(slab) = slab.free.pop_front() {
- frame_alloc.free_slab(slab);
+ }))
}
+
+ // Wait for all threads
+ for h in handles {
+ h.join().unwrap();
+ }
+
+ // Purge the all slab free lsts (the partial list should be empty).
+ for slab_man in SLAB_ALLOCATOR.get().unwrap().managers.iter() {
+ let mut frame_alloc = FIXTURE.get().unwrap().allocator.inner.lock_save_irq();
+
+ let mut slab = slab_man.lock_save_irq();
+
+ assert!(slab.partial.is_empty());
+
+ while let Some(slab) = slab.free.pop_front() {
+ frame_alloc.free_slab(slab);
+ }
+
+ slab.free_list_sz = 0;
+ }
+
+ let final_free = get_fixture().allocator.free_pages();
+
+ assert_eq!(initial_free_pages, final_free);
}
-
- let final_free = get_fixture().allocator.free_pages();
-
- assert_eq!(initial_free_pages, final_free);
}
}