syscalls: mprotect: implement

Implement the `mprotect` syscall. Including a bunch of tests for
ensuring proper VMA functionality regarding splitting and merging via
`mprotect`.
This commit is contained in:
Matthew Leach
2025-11-30 16:14:15 +00:00
committed by Matthew Leach
parent baf98c452f
commit 910203e0c2
6 changed files with 294 additions and 6 deletions

View File

@@ -226,7 +226,7 @@
| 0xdf (223) | fadvise64_64 | (int fd, loff_t offset, loff_t len, int advice) | __arm64_sys_fadvise64_64 | false |
| 0xe0 (224) | swapon | (const char *specialfile, int swap_flags) | __arm64_sys_swapon | false |
| 0xe1 (225) | swapoff | (const char *specialfile) | __arm64_sys_swapoff | false |
| 0xe2 (226) | mprotect | (unsigned long start, size_t len, unsigned long prot) | __arm64_sys_mprotect | false |
| 0xe2 (226) | mprotect | (unsigned long start, size_t len, unsigned long prot) | __arm64_sys_mprotect | true |
| 0xe3 (227) | msync | (unsigned long start, size_t len, int flags) | __arm64_sys_msync | false |
| 0xe4 (228) | mlock | (unsigned long start, size_t len) | __arm64_sys_mlock | false |
| 0xe5 (229) | munlock | (unsigned long start, size_t len) | __arm64_sys_munlock | false |

View File

@@ -49,6 +49,7 @@ pub fn kern_err_to_syscall(err: KernelError) -> isize {
KernelError::NotATty => ENOTTY,
KernelError::SeekPipe => ESPIPE,
KernelError::NotSupported => ENOSYS,
KernelError::NoMemory => ENOMEM,
_ => todo!(),
}
}

View File

@@ -165,6 +165,67 @@ impl<AS: UserAddressSpace> MemoryMap<AS> {
self.unmap_region(range.align_to_page_boundary(), None)
}
pub fn mprotect(
&mut self,
protect_region: VirtMemoryRegion,
new_perms: VMAPermissions,
) -> Result<()> {
if !protect_region.is_page_aligned() {
return Err(KernelError::InvalidValue);
}
if protect_region.size() == 0 {
return Err(KernelError::InvalidValue);
}
let affected_vma_addr = self
.find_vma(protect_region.start_address())
.map(|x| x.region.start_address())
.ok_or(KernelError::NoMemory)?;
let affected_vma = self
.vmas
.remove(&affected_vma_addr)
.expect("Should have the same key as the start address");
// Easy case, the entire VMA is changing.
if affected_vma.region == protect_region {
let old_vma = affected_vma.clone();
let mut new_vma = old_vma.clone();
new_vma.permissions = new_perms;
self.insert_and_merge(new_vma.clone());
self.address_space
.protect_range(protect_region, new_perms.into())?;
return Ok(());
}
// Next case, a sub-region of a VMA is changing, requring a split.
if affected_vma.region.contains(protect_region) {
let (left, right) = affected_vma.region.punch_hole(protect_region);
let mut new_vma = affected_vma.clone().shrink_to(protect_region);
new_vma.permissions = new_perms;
if let Some(left) = left {
self.insert_and_merge(affected_vma.shrink_to(left));
}
self.address_space
.protect_range(protect_region, new_perms.into())?;
self.insert_and_merge(new_vma);
if let Some(right) = right {
self.insert_and_merge(affected_vma.shrink_to(right));
}
return Ok(());
}
// TODO: protecting over contiguous VMAreas.
Err(KernelError::NoMemory)
}
/// Checks if a given virtual memory region is completely free.
fn is_region_free(&self, region: VirtMemoryRegion) -> bool {
// Find the VMA that might overlap with the start of our desired region.

View File

@@ -153,6 +153,37 @@ fn assert_vma_exists(pvm: &MemoryMap<MockAddressSpace>, start: usize, size: usiz
assert_eq!(vma.region.size(), size, "VMA size mismatch");
}
fn assert_vma_perms(pvm: &MemoryMap<MockAddressSpace>, start: usize, perms: VMAPermissions) {
let vma = pvm
.find_vma(VA::from_value(start))
.expect("VMA not found for permission check");
assert_eq!(
vma.permissions(),
perms,
"VMA permissions mismatch at {:#x}",
start
);
}
fn assert_ops_log_protect(
pvm: &MemoryMap<MockAddressSpace>,
expected_region: VirtMemoryRegion,
expected_perms: VMAPermissions,
) {
let log = pvm.address_space.ops_log.lock().unwrap();
let found = log.iter().any(|op| match op {
MockPageTableOp::ProtectRange { region, perms } => {
*region == expected_region && *perms == expected_perms.into()
}
_ => false,
});
assert!(
found,
"Did not find ProtectRange op for {:?} with {:?}",
expected_region, expected_perms
);
}
#[test]
fn test_mmap_any_empty() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
@@ -685,3 +716,179 @@ fn test_munmap_over_multiple_vmas() {
]
);
}
#[test]
fn mprotect_full_vma() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = MMAP_BASE - 4 * PAGE_SIZE;
let size = 4 * PAGE_SIZE;
pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));
// Protect entire region to RO
let region = VirtMemoryRegion::new(VA::from_value(start), size);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();
assert_eq!(pvm.vmas.len(), 1); // Should still be 1 VMA
assert_vma_exists(&pvm, start, size);
assert_vma_perms(&pvm, start, VMAPermissions::ro());
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}
#[test]
fn test_mprotect_split_middle() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x10000;
let size = 3 * PAGE_SIZE; // [0x10000, 0x11000, 0x12000]
pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));
let protect_start = start + PAGE_SIZE;
let protect_len = PAGE_SIZE;
let region = VirtMemoryRegion::new(VA::from_value(protect_start), protect_len);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();
// Should now be 3 VMAs: RW - RO - RW
assert_eq!(pvm.vmas.len(), 3);
// Left
assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::rw());
// Middle
assert_vma_exists(&pvm, protect_start, PAGE_SIZE);
assert_vma_perms(&pvm, protect_start, VMAPermissions::ro());
// Right
assert_vma_exists(&pvm, start + 2 * PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + 2 * PAGE_SIZE, VMAPermissions::rw());
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}
#[test]
fn test_mprotect_split_start() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x20000;
let size = 2 * PAGE_SIZE;
pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));
let region = VirtMemoryRegion::new(VA::from_value(start), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();
// Should be 2 VMAs: RO - RW
assert_eq!(pvm.vmas.len(), 2);
assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::ro());
assert_vma_exists(&pvm, start + PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + PAGE_SIZE, VMAPermissions::rw());
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}
#[test]
fn test_mprotect_split_end() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x30000;
let size = 2 * PAGE_SIZE;
pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));
let region = VirtMemoryRegion::new(VA::from_value(start + PAGE_SIZE), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();
// Should be 2 VMAs: RW - RO
assert_eq!(pvm.vmas.len(), 2);
assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::rw());
assert_vma_exists(&pvm, start + PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + PAGE_SIZE, VMAPermissions::ro());
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}
#[test]
fn test_mprotect_file_backed_split() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x40000;
let size = 3 * PAGE_SIZE;
let file_offset = 0x1000;
let inode = Arc::new(DummyTestInode);
// VMA: [0x40000 - 0x43000), File Offset: 0x1000
pvm.insert_and_merge(create_file_vma(
start,
size,
VMAPermissions::rw(),
file_offset,
inode.clone(),
));
// Protect Middle Page [0x41000 - 0x42000)
let region = VirtMemoryRegion::new(VA::from_value(start + PAGE_SIZE), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();
// Left VMA: 0x40000, Len 0x1000, Offset 0x1000
let left = pvm.find_vma(VA::from_value(start)).unwrap();
if let VMAreaKind::File(f) = &left.kind {
assert_eq!(f.offset, 0x1000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Left VMA lost file backing");
}
// Middle VMA: 0x41000, Len 0x1000, Offset 0x2000 (0x1000 + 0x1000)
let middle = pvm.find_vma(VA::from_value(start + PAGE_SIZE)).unwrap();
assert_eq!(middle.permissions(), VMAPermissions::ro());
if let VMAreaKind::File(f) = &middle.kind {
assert_eq!(f.offset, 0x2000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Middle VMA lost file backing");
}
// Right VMA: 0x42000, Len 0x1000, Offset 0x3000 (0x1000 + 0x2000)
let right = pvm.find_vma(VA::from_value(start + 2 * PAGE_SIZE)).unwrap();
if let VMAreaKind::File(f) = &right.kind {
assert_eq!(f.offset, 0x3000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Right VMA lost file backing");
}
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}
#[test]
fn test_mprotect_merge_restoration() {
// Ensures that if we split permissions, then restore them, the VMAs
// merge back together.
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x50000;
let size = 2 * PAGE_SIZE;
pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));
// Split.
let region1 = VirtMemoryRegion::new(VA::from_value(start), PAGE_SIZE);
pvm.mprotect(region1, VMAPermissions::ro()).unwrap();
assert_eq!(pvm.vmas.len(), 2);
// Restore back to RW
pvm.mprotect(region1, VMAPermissions::rw()).unwrap();
// 3. Should merge back to 1 VMA
assert_eq!(
pvm.vmas.len(),
1,
"VMAs failed to merge back after permissions restored"
);
assert_vma_exists(&pvm, start, size);
assert_vma_perms(&pvm, start, VMAPermissions::rw());
}

View File

@@ -1,5 +1,6 @@
use crate::kernel::power::sys_reboot;
use crate::kernel::rand::sys_getrandom;
use crate::memory::mmap::sys_mprotect;
use crate::{
arch::{Arch, ArchImpl},
clock::{gettime::sys_clock_gettime, timeofday::sys_gettimeofday},
@@ -248,6 +249,7 @@ pub async fn handle_syscall() {
.await
}
0xde => sys_mmap(arg1, arg2, arg3, arg4, arg5.into(), arg6).await,
0xe2 => sys_mprotect(VA::from_value(arg1 as _), arg2 as _, arg3 as _),
0x104 => {
sys_wait4(
arg1.cast_signed() as _,

View File

@@ -29,6 +29,14 @@ const MAP_ANONYMOUS: u64 = 0x0020;
/// MAP_FIXED{,_NOREPLACE}.
static MMAP_MIN_ADDR: AtomicUsize = AtomicUsize::new(0x1000);
fn prot_to_perms(prot: u64) -> VMAPermissions {
VMAPermissions {
read: (prot & PROT_READ) != 0,
write: (prot & PROT_WRITE) != 0,
execute: (prot & PROT_EXEC) != 0,
}
}
/// Handles the `mmap` system call.
///
/// # Arguments
@@ -74,11 +82,7 @@ pub async fn sys_mmap(
return Err(KernelError::InvalidValue);
}
let permissions = VMAPermissions {
read: (prot & PROT_READ) != 0,
write: (prot & PROT_WRITE) != 0,
execute: (prot & PROT_EXEC) != 0,
};
let permissions = prot_to_perms(prot);
let requested_len = len as usize;
@@ -137,3 +141,16 @@ pub async fn sys_munmap(addr: VA, len: usize) -> Result<usize> {
Ok(0)
}
pub fn sys_mprotect(addr: VA, len: usize, prot: u64) -> Result<usize> {
let perms = prot_to_perms(prot);
let region = VirtMemoryRegion::new(addr, len);
current_task()
.vm
.lock_save_irq()
.mm_mut()
.mprotect(region, perms)?;
Ok(0)
}