sched: eliminate CUR_TASK_PTR

Replace `CUR_TASK_PTR` with `ProcessCtx`. This allows differentiation
between functions that access process context (take in `ProcessCtx` as a
parameter) and those that don't.

When creating a new class of scheduleable tasks (softirqs, kthreads),
this ensure that those functions cannot call context-sensitive
functions.
This commit is contained in:
Matthew Leach
2026-03-17 06:02:14 +00:00
parent 3d20e28c4f
commit aaac9ffd2c
90 changed files with 1101 additions and 775 deletions

View File

@@ -7,7 +7,7 @@ use crate::{
interrupts::get_interrupt_root,
ksym_pa,
memory::PAGE_ALLOC,
sched::{current::current_task, uspc_ret::dispatch_userspace_task},
sched::{syscall_ctx::ProcessCtx, uspc_ret::dispatch_userspace_task},
spawn_kernel_work,
};
use aarch64_cpu::registers::{CPACR_EL1, ReadWriteable, VBAR_EL1};
@@ -148,7 +148,11 @@ extern "C" fn el1_serror_spx(state: &mut ExceptionState) {
#[unsafe(no_mangle)]
extern "C" fn el0_sync(state_ptr: *mut ExceptionState) -> *const ExceptionState {
current_task().ctx.save_user_ctx(state_ptr);
// SAFETY: Since we've just entered form EL0, there *cannot* be another
// syscall currently running for this task, therefore exclusive access to
// `OwnedTask` is guaranteed.
let mut ctx = unsafe { ProcessCtx::from_current() };
ctx.task_mut().ctx.save_user_ctx(state_ptr);
let state = unsafe { state_ptr.as_ref().unwrap() };
@@ -158,10 +162,14 @@ extern "C" fn el0_sync(state_ptr: *mut ExceptionState) -> *const ExceptionState
match exception {
Exception::InstrAbortLowerEL(info) | Exception::DataAbortLowerEL(info) => {
handle_mem_fault(exception, info);
handle_mem_fault(&mut ctx, exception, info);
}
Exception::SVC64(_) => {
spawn_kernel_work(handle_syscall());
// SAFETY: The other `ctx` won't be poll'd until
// `dispatch_userspace_task` at which point this variable will have
// gone out of scope.
let mut ctx2 = unsafe { ctx.clone() };
spawn_kernel_work(&mut ctx2, handle_syscall(ctx));
}
Exception::TrappedFP(_) => {
CPACR_EL1.modify(CPACR_EL1::FPEN::TrapNothing);
@@ -178,7 +186,11 @@ extern "C" fn el0_sync(state_ptr: *mut ExceptionState) -> *const ExceptionState
#[unsafe(no_mangle)]
extern "C" fn el0_irq(state: *mut ExceptionState) -> *mut ExceptionState {
current_task().ctx.save_user_ctx(state);
// SAFETY: Since we've just entered form EL0, there *cannot* be another
// syscall currently running for this task, therefore exclusive access to
// `OwnedTask` is guaranteed.
let mut ctx = unsafe { ProcessCtx::from_current() };
ctx.task_mut().ctx.save_user_ctx(state);
match get_interrupt_root() {
Some(ref im) => im.handle_interrupt(),

View File

@@ -97,7 +97,7 @@ use crate::{
},
threading::{futex::sys_futex, sys_set_robust_list, sys_set_tid_address},
},
sched::{self, current::current_task, sched_task::state::TaskState, sys_sched_yield},
sched::{self, sched_task::state::TaskState, sys_sched_yield},
};
use alloc::boxed::Box;
use libkernel::{
@@ -105,16 +105,15 @@ use libkernel::{
memory::address::{TUA, UA, VA},
};
pub async fn handle_syscall() {
current_task().update_accounting(None);
current_task().in_syscall = true;
ptrace_stop(TracePoint::SyscallEntry).await;
use crate::sched::syscall_ctx::ProcessCtx;
pub async fn handle_syscall(mut ctx: ProcessCtx) {
ctx.task_mut().update_accounting(None);
ctx.task_mut().in_syscall = true;
ptrace_stop(&ctx, TracePoint::SyscallEntry).await;
let (nr, arg1, arg2, arg3, arg4, arg5, arg6) = {
let mut task = current_task();
let ctx = &mut task.ctx;
let state = ctx.user();
let state = ctx.task().ctx.user();
(
state.x[8] as u32,
@@ -130,6 +129,7 @@ pub async fn handle_syscall() {
let res = match nr {
0x5 => {
sys_setxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -140,6 +140,7 @@ pub async fn handle_syscall() {
}
0x6 => {
sys_lsetxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -150,6 +151,7 @@ pub async fn handle_syscall() {
}
0x7 => {
sys_fsetxattr(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -160,6 +162,7 @@ pub async fn handle_syscall() {
}
0x8 => {
sys_getxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -169,6 +172,7 @@ pub async fn handle_syscall() {
}
0x9 => {
sys_lgetxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -178,6 +182,7 @@ pub async fn handle_syscall() {
}
0xa => {
sys_fgetxattr(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -187,6 +192,7 @@ pub async fn handle_syscall() {
}
0xb => {
sys_listxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -195,26 +201,28 @@ pub async fn handle_syscall() {
}
0xc => {
sys_llistxattr(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
arg3 as _,
)
.await
}
0xd => sys_flistxattr(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0xe => sys_removexattr(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0xf => sys_lremovexattr(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x10 => sys_fremovexattr(arg1.into(), TUA::from_value(arg2 as _)).await,
0x11 => sys_getcwd(TUA::from_value(arg1 as _), arg2 as _).await,
0x17 => sys_dup(arg1.into()),
0x18 => sys_dup3(arg1.into(), arg2.into(), arg3 as _),
0x19 => sys_fcntl(arg1.into(), arg2 as _, arg3 as _).await,
0x1d => sys_ioctl(arg1.into(), arg2 as _, arg3 as _).await,
0xd => sys_flistxattr(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0xe => sys_removexattr(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0xf => sys_lremovexattr(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x10 => sys_fremovexattr(&ctx, arg1.into(), TUA::from_value(arg2 as _)).await,
0x11 => sys_getcwd(&ctx, TUA::from_value(arg1 as _), arg2 as _).await,
0x17 => sys_dup(&ctx, arg1.into()),
0x18 => sys_dup3(&ctx, arg1.into(), arg2.into(), arg3 as _),
0x19 => sys_fcntl(&ctx, arg1.into(), arg2 as _, arg3 as _).await,
0x1d => sys_ioctl(&ctx, arg1.into(), arg2 as _, arg3 as _).await,
0x20 => Ok(0), // sys_flock is a noop
0x22 => sys_mkdirat(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x23 => sys_unlinkat(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x22 => sys_mkdirat(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x23 => sys_unlinkat(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x24 => {
sys_symlinkat(
&ctx,
TUA::from_value(arg1 as _),
arg2.into(),
TUA::from_value(arg3 as _),
@@ -223,6 +231,7 @@ pub async fn handle_syscall() {
}
0x25 => {
sys_linkat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3.into(),
@@ -233,6 +242,7 @@ pub async fn handle_syscall() {
}
0x26 => {
sys_renameat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3.into(),
@@ -240,17 +250,18 @@ pub async fn handle_syscall() {
)
.await
}
0x2b => sys_statfs(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x2c => sys_fstatfs(arg1.into(), TUA::from_value(arg2 as _)).await,
0x2d => sys_truncate(TUA::from_value(arg1 as _), arg2 as _).await,
0x2e => sys_ftruncate(arg1.into(), arg2 as _).await,
0x30 => sys_faccessat(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x31 => sys_chdir(TUA::from_value(arg1 as _)).await,
0x32 => sys_fchdir(arg1.into()).await,
0x33 => sys_chroot(TUA::from_value(arg1 as _)).await,
0x34 => sys_fchmod(arg1.into(), arg2 as _).await,
0x2b => sys_statfs(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x2c => sys_fstatfs(&ctx, arg1.into(), TUA::from_value(arg2 as _)).await,
0x2d => sys_truncate(&ctx, TUA::from_value(arg1 as _), arg2 as _).await,
0x2e => sys_ftruncate(&ctx, arg1.into(), arg2 as _).await,
0x30 => sys_faccessat(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x31 => sys_chdir(&ctx, TUA::from_value(arg1 as _)).await,
0x32 => sys_fchdir(&ctx, arg1.into()).await,
0x33 => sys_chroot(&ctx, TUA::from_value(arg1 as _)).await,
0x34 => sys_fchmod(&ctx, arg1.into(), arg2 as _).await,
0x35 => {
sys_fchmodat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -260,6 +271,7 @@ pub async fn handle_syscall() {
}
0x36 => {
sys_fchownat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -268,9 +280,10 @@ pub async fn handle_syscall() {
)
.await
}
0x37 => sys_fchown(arg1.into(), arg2 as _, arg3 as _).await,
0x37 => sys_fchown(&ctx, arg1.into(), arg2 as _, arg3 as _).await,
0x38 => {
sys_openat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -278,16 +291,17 @@ pub async fn handle_syscall() {
)
.await
}
0x39 => sys_close(arg1.into()).await,
0x3b => sys_pipe2(TUA::from_value(arg1 as _), arg2 as _).await,
0x3d => sys_getdents64(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x3e => sys_lseek(arg1.into(), arg2 as _, arg3 as _).await,
0x3f => sys_read(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x40 => sys_write(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x41 => sys_readv(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x42 => sys_writev(arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x39 => sys_close(&ctx, arg1.into()).await,
0x3b => sys_pipe2(&ctx, TUA::from_value(arg1 as _), arg2 as _).await,
0x3d => sys_getdents64(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x3e => sys_lseek(&ctx, arg1.into(), arg2 as _, arg3 as _).await,
0x3f => sys_read(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x40 => sys_write(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x41 => sys_readv(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x42 => sys_writev(&ctx, arg1.into(), TUA::from_value(arg2 as _), arg3 as _).await,
0x43 => {
sys_pread64(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -297,6 +311,7 @@ pub async fn handle_syscall() {
}
0x44 => {
sys_pwrite64(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -306,6 +321,7 @@ pub async fn handle_syscall() {
}
0x45 => {
sys_preadv(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -315,6 +331,7 @@ pub async fn handle_syscall() {
}
0x46 => {
sys_pwritev(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -324,6 +341,7 @@ pub async fn handle_syscall() {
}
0x47 => {
sys_sendfile(
&ctx,
arg1.into(),
arg2.into(),
TUA::from_value(arg3 as _),
@@ -333,6 +351,7 @@ pub async fn handle_syscall() {
}
0x48 => {
sys_pselect6(
&ctx,
arg1 as _,
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -344,6 +363,7 @@ pub async fn handle_syscall() {
}
0x49 => {
sys_ppoll(
&ctx,
TUA::from_value(arg1 as _),
arg2 as _,
TUA::from_value(arg3 as _),
@@ -354,6 +374,7 @@ pub async fn handle_syscall() {
}
0x4e => {
sys_readlinkat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -363,6 +384,7 @@ pub async fn handle_syscall() {
}
0x4f => {
sys_newfstatat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -370,12 +392,13 @@ pub async fn handle_syscall() {
)
.await
}
0x50 => sys_fstat(arg1.into(), TUA::from_value(arg2 as _)).await,
0x51 => sys_sync().await,
0x52 => sys_fsync(arg1.into()).await,
0x53 => sys_fdatasync(arg1.into()).await,
0x50 => sys_fstat(&ctx, arg1.into(), TUA::from_value(arg2 as _)).await,
0x51 => sys_sync(&ctx).await,
0x52 => sys_fsync(&ctx, arg1.into()).await,
0x53 => sys_fdatasync(&ctx, arg1.into()).await,
0x58 => {
sys_utimensat(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -383,10 +406,10 @@ pub async fn handle_syscall() {
)
.await
}
0x5a => sys_capget(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x5b => sys_capset(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x5a => sys_capget(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x5b => sys_capset(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x5d => {
let _ = sys_exit(arg1 as _).await;
let _ = sys_exit(&mut ctx, arg1 as _).await;
debug_assert!(
sched::current_work()
@@ -399,7 +422,7 @@ pub async fn handle_syscall() {
return;
}
0x5e => {
let _ = sys_exit_group(arg1 as _).await;
let _ = sys_exit_group(&ctx, arg1 as _).await;
debug_assert!(
sched::current_work()
@@ -413,6 +436,7 @@ pub async fn handle_syscall() {
}
0x5f => {
sys_waitid(
&ctx,
arg1 as _,
arg2 as _,
TUA::from_value(arg3 as _),
@@ -421,9 +445,10 @@ pub async fn handle_syscall() {
)
.await
}
0x60 => sys_set_tid_address(TUA::from_value(arg1 as _)),
0x60 => sys_set_tid_address(&mut ctx, TUA::from_value(arg1 as _)),
0x62 => {
sys_futex(
&ctx,
TUA::from_value(arg1 as _),
arg2 as _,
arg3 as _,
@@ -433,10 +458,10 @@ pub async fn handle_syscall() {
)
.await
}
0x63 => sys_set_robust_list(TUA::from_value(arg1 as _), arg2 as _).await,
0x63 => sys_set_robust_list(&mut ctx, TUA::from_value(arg1 as _), arg2 as _).await,
0x65 => sys_nanosleep(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x70 => sys_clock_settime(arg1 as _, TUA::from_value(arg2 as _)).await,
0x71 => sys_clock_gettime(arg1 as _, TUA::from_value(arg2 as _)).await,
0x71 => sys_clock_gettime(&ctx, arg1 as _, TUA::from_value(arg2 as _)).await,
0x73 => {
sys_clock_nanosleep(
arg1 as _,
@@ -448,6 +473,7 @@ pub async fn handle_syscall() {
}
0x75 => {
sys_ptrace(
&ctx,
arg1 as _,
arg2 as _,
TUA::from_value(arg3 as _),
@@ -457,11 +483,12 @@ pub async fn handle_syscall() {
}
0x7b => Err(KernelError::NotSupported),
0x7c => sys_sched_yield(),
0x81 => sys_kill(arg1 as _, arg2.into()),
0x82 => sys_tkill(arg1 as _, arg2.into()),
0x84 => sys_sigaltstack(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x81 => sys_kill(&ctx, arg1 as _, arg2.into()),
0x82 => sys_tkill(&ctx, arg1 as _, arg2.into()),
0x84 => sys_sigaltstack(&ctx, TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0x86 => {
sys_rt_sigaction(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -471,6 +498,7 @@ pub async fn handle_syscall() {
}
0x87 => {
sys_rt_sigprocmask(
&mut ctx,
arg1 as _,
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -480,15 +508,21 @@ pub async fn handle_syscall() {
}
0x8b => {
// Special case for sys_rt_sigreturn
current_task()
//
// SAFETY: Signal work will only be polled once this kernel work has
// returned. Therefore there will be no concurrent accesses of the
// ctx.
let ctx2 = unsafe { ctx.clone() };
ctx.task_mut()
.ctx
.put_signal_work(Box::pin(ArchImpl::do_signal_return()));
.put_signal_work(Box::pin(ArchImpl::do_signal_return(ctx2)));
return;
}
0x8e => sys_reboot(arg1 as _, arg2 as _, arg3 as _, arg4 as _).await,
0x8e => sys_reboot(&ctx, arg1 as _, arg2 as _, arg3 as _, arg4 as _).await,
0x94 => {
sys_getresuid(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -497,47 +531,50 @@ pub async fn handle_syscall() {
}
0x96 => {
sys_getresgid(
&ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
)
.await
}
0x97 => sys_setfsuid(arg1 as _).map_err(|e| match e {}),
0x98 => sys_setfsgid(arg1 as _).map_err(|e| match e {}),
0x9a => sys_setpgid(arg1 as _, Pgid(arg2 as _)),
0x9b => sys_getpgid(arg1 as _),
0x9c => sys_getsid().await,
0x9d => sys_setsid().await,
0x97 => sys_setfsuid(&ctx, arg1 as _).map_err(|e| match e {}),
0x98 => sys_setfsgid(&ctx, arg1 as _).map_err(|e| match e {}),
0x9a => sys_setpgid(&ctx, arg1 as _, Pgid(arg2 as _)),
0x9b => sys_getpgid(&ctx, arg1 as _),
0x9c => sys_getsid(&ctx).await,
0x9d => sys_setsid(&ctx).await,
0xa0 => sys_uname(TUA::from_value(arg1 as _)).await,
0xa1 => sys_sethostname(TUA::from_value(arg1 as _), arg2 as _).await,
0xa1 => sys_sethostname(&ctx, TUA::from_value(arg1 as _), arg2 as _).await,
0xa3 => Err(KernelError::InvalidValue),
0xa6 => sys_umask(arg1 as _).map_err(|e| match e {}),
0xa7 => sys_prctl(arg1 as _, arg2, arg3).await,
0xa6 => sys_umask(&ctx, arg1 as _).map_err(|e| match e {}),
0xa7 => sys_prctl(&ctx, arg1 as _, arg2, arg3).await,
0xa9 => sys_gettimeofday(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0xaa => sys_settimeofday(TUA::from_value(arg1 as _), TUA::from_value(arg2 as _)).await,
0xac => sys_getpid().map_err(|e| match e {}),
0xad => sys_getppid().map_err(|e| match e {}),
0xae => sys_getuid().map_err(|e| match e {}),
0xaf => sys_geteuid().map_err(|e| match e {}),
0xb0 => sys_getgid().map_err(|e| match e {}),
0xb1 => sys_getegid().map_err(|e| match e {}),
0xb2 => sys_gettid().map_err(|e| match e {}),
0xac => sys_getpid(&ctx).map_err(|e| match e {}),
0xad => sys_getppid(&ctx).map_err(|e| match e {}),
0xae => sys_getuid(&ctx).map_err(|e| match e {}),
0xaf => sys_geteuid(&ctx).map_err(|e| match e {}),
0xb0 => sys_getgid(&ctx).map_err(|e| match e {}),
0xb1 => sys_getegid(&ctx).map_err(|e| match e {}),
0xb2 => sys_gettid(&ctx).map_err(|e| match e {}),
0xb3 => sys_sysinfo(TUA::from_value(arg1 as _)).await,
0xc6 => sys_socket(arg1 as _, arg2 as _, arg3 as _).await,
0xc8 => sys_bind(arg1.into(), UA::from_value(arg2 as _), arg3 as _).await,
0xc9 => sys_listen(arg1.into(), arg2 as _).await,
0xc6 => sys_socket(&ctx, arg1 as _, arg2 as _, arg3 as _).await,
0xc8 => sys_bind(&ctx, arg1.into(), UA::from_value(arg2 as _), arg3 as _).await,
0xc9 => sys_listen(&ctx, arg1.into(), arg2 as _).await,
0xca => {
sys_accept(
&ctx,
arg1.into(),
UA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
)
.await
}
0xcb => sys_connect(arg1.into(), UA::from_value(arg2 as _), arg3 as _).await,
0xcb => sys_connect(&ctx, arg1.into(), UA::from_value(arg2 as _), arg3 as _).await,
0xce => {
sys_sendto(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -549,6 +586,7 @@ pub async fn handle_syscall() {
}
0xcf => {
sys_recvfrom(
&ctx,
arg1.into(),
UA::from_value(arg2 as _),
arg3 as _,
@@ -558,13 +596,14 @@ pub async fn handle_syscall() {
)
.await
}
0xd2 => sys_shutdown(arg1.into(), arg2 as _).await,
0xd6 => sys_brk(VA::from_value(arg1 as _))
0xd2 => sys_shutdown(&ctx, arg1.into(), arg2 as _).await,
0xd6 => sys_brk(&ctx, VA::from_value(arg1 as _))
.await
.map_err(|e| match e {}),
0xd7 => sys_munmap(VA::from_value(arg1 as usize), arg2 as _).await,
0xd7 => sys_munmap(&ctx, VA::from_value(arg1 as usize), arg2 as _).await,
0xdc => {
sys_clone(
&ctx,
arg1 as _,
UA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -575,19 +614,21 @@ pub async fn handle_syscall() {
}
0xdd => {
sys_execve(
&mut ctx,
TUA::from_value(arg1 as _),
TUA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
)
.await
}
0xde => sys_mmap(arg1, arg2, arg3, arg4, arg5.into(), arg6).await,
0xde => sys_mmap(&ctx, arg1, arg2, arg3, arg4, arg5.into(), arg6).await,
0xdf => Ok(0), // fadvise64_64 is a no-op
0xe2 => sys_mprotect(VA::from_value(arg1 as _), arg2 as _, arg3 as _),
0xe8 => sys_mincore(arg1, arg2 as _, TUA::from_value(arg3 as _)).await,
0xe2 => sys_mprotect(&ctx, VA::from_value(arg1 as _), arg2 as _, arg3 as _),
0xe8 => sys_mincore(&ctx, arg1, arg2 as _, TUA::from_value(arg3 as _)).await,
0xe9 => Ok(0), // sys_madvise is a no-op
0xf2 => {
sys_accept4(
&ctx,
arg1.into(),
UA::from_value(arg2 as _),
TUA::from_value(arg3 as _),
@@ -597,6 +638,7 @@ pub async fn handle_syscall() {
}
0x104 => {
sys_wait4(
&ctx,
arg1.cast_signed() as _,
TUA::from_value(arg2 as _),
arg3 as _,
@@ -606,6 +648,7 @@ pub async fn handle_syscall() {
}
0x105 => {
sys_prlimit64(
&ctx,
arg1 as _,
arg2 as _,
TUA::from_value(arg3 as _),
@@ -615,7 +658,7 @@ pub async fn handle_syscall() {
}
0x108 => sys_name_to_handle_at(),
0x109 => Err(KernelError::NotSupported),
0x10b => sys_syncfs(arg1.into()).await,
0x10b => sys_syncfs(&ctx, arg1.into()).await,
0x10e => {
sys_process_vm_readv(
arg1 as _,
@@ -629,6 +672,7 @@ pub async fn handle_syscall() {
}
0x114 => {
sys_renameat2(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3.into(),
@@ -640,6 +684,7 @@ pub async fn handle_syscall() {
0x116 => sys_getrandom(TUA::from_value(arg1 as _), arg2 as _, arg3 as _).await,
0x11d => {
sys_copy_file_range(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3.into(),
@@ -651,6 +696,7 @@ pub async fn handle_syscall() {
}
0x11e => {
sys_preadv2(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -661,6 +707,7 @@ pub async fn handle_syscall() {
}
0x11f => {
sys_pwritev2(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -671,6 +718,7 @@ pub async fn handle_syscall() {
}
0x123 => {
sys_statx(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -680,9 +728,10 @@ pub async fn handle_syscall() {
.await
}
0x125 => Err(KernelError::NotSupported),
0x1b4 => sys_close_range(arg1.into(), arg2.into(), arg3 as _).await,
0x1b4 => sys_close_range(&ctx, arg1.into(), arg2.into(), arg3 as _).await,
0x1b7 => {
sys_faccessat2(
&ctx,
arg1.into(),
TUA::from_value(arg2 as _),
arg3 as _,
@@ -693,7 +742,7 @@ pub async fn handle_syscall() {
0x1b8 => Ok(0), // process_madvise is a no-op
_ => panic!(
"Unhandled syscall 0x{nr:x}, PC: 0x{:x}",
current_task().ctx.user().elr_el1
ctx.task().ctx.user().elr_el1
),
};
@@ -702,8 +751,8 @@ pub async fn handle_syscall() {
Err(e) => kern_err_to_syscall(e),
};
current_task().ctx.user_mut().x[0] = ret_val.cast_unsigned() as u64;
ptrace_stop(TracePoint::SyscallExit).await;
current_task().update_accounting(None);
current_task().in_syscall = false;
ctx.task_mut().ctx.user_mut().x[0] = ret_val.cast_unsigned() as u64;
ptrace_stop(&ctx, TracePoint::SyscallExit).await;
ctx.task_mut().update_accounting(None);
ctx.task_mut().in_syscall = false;
}

View File

@@ -10,10 +10,11 @@ use crate::{
memory::uaccess::UAccessResult,
},
memory::fault::{FaultResolution, handle_demand_fault, handle_protection_fault},
process::thread_group::signal::SigId,
sched::{current::current_task, spawn_kernel_work},
process::{ProcVM, thread_group::signal::SigId},
sched::{current_work, spawn_kernel_work, syscall_ctx::ProcessCtx},
sync::SpinLock,
};
use alloc::boxed::Box;
use alloc::{boxed::Box, sync::Arc};
use libkernel::{
UserAddressSpace,
error::Result,
@@ -37,20 +38,22 @@ impl FixupTable {
}
}
fn run_mem_fault_handler(exception: Exception, info: AbortIss) -> Result<FaultResolution> {
fn run_mem_fault_handler(
proc_vm: Arc<SpinLock<ProcVM>>,
exception: Exception,
info: AbortIss,
) -> Result<FaultResolution> {
let access_kind = determine_access_kind(exception, info);
if let Some(far) = info.far {
let fault_addr = VA::from_value(far as usize);
let task = current_task();
match info.ifsc.category() {
IfscCategory::TranslationFault => {
handle_demand_fault(task.vm.clone(), fault_addr, access_kind)
handle_demand_fault(proc_vm.clone(), fault_addr, access_kind)
}
IfscCategory::PermissionFault => {
let mut vm = task.vm.lock_save_irq();
let mut vm = proc_vm.lock_save_irq();
let pg_info = vm
.mm_mut()
@@ -68,7 +71,7 @@ fn run_mem_fault_handler(exception: Exception, info: AbortIss) -> Result<FaultRe
}
fn handle_uacess_abort(exception: Exception, info: AbortIss, state: &mut ExceptionState) {
match run_mem_fault_handler(exception, info) {
match run_mem_fault_handler(current_work().vm.clone(), exception, info) {
// We mapped in a page, the uacess handler can proceed.
Ok(FaultResolution::Resolved) => (),
// If the fault couldn't be resolved, signal to the uacess fixup that
@@ -117,16 +120,16 @@ pub fn handle_kernel_mem_fault(exception: Exception, info: AbortIss, state: &mut
}
}
pub fn handle_mem_fault(exception: Exception, info: AbortIss) {
match run_mem_fault_handler(exception, info) {
pub fn handle_mem_fault(ctx: &mut ProcessCtx, exception: Exception, info: AbortIss) {
match run_mem_fault_handler(ctx.shared().vm.clone(), exception, info) {
Ok(FaultResolution::Resolved) => {}
Ok(FaultResolution::Denied) => {
current_task().process.deliver_signal(SigId::SIGSEGV);
ctx.task().process.deliver_signal(SigId::SIGSEGV);
}
// If the page fault involves sleepy kernel work, we can
// spawn that work on the process, since there is no other
// kernel work happening.
Ok(FaultResolution::Deferred(fut)) => spawn_kernel_work(async {
Ok(FaultResolution::Deferred(fut)) => spawn_kernel_work(ctx, async {
if Box::into_pin(fut).await.is_err() {
panic!("Page fault defered error, SIGBUS on process");
}

View File

@@ -26,6 +26,7 @@ use crate::{
owned::OwnedTask,
thread_group::signal::{SigId, ksigaction::UserspaceSigAction},
},
sched::syscall_ctx::ProcessCtx,
sync::SpinLock,
};
@@ -101,14 +102,17 @@ impl Arch for Aarch64 {
}
fn do_signal(
ctx: ProcessCtx,
sig: SigId,
action: UserspaceSigAction,
) -> impl Future<Output = Result<<Self as Arch>::UserContext>> {
proc::signal::do_signal(sig, action)
proc::signal::do_signal(ctx, sig, action)
}
fn do_signal_return() -> impl Future<Output = Result<<Self as Arch>::UserContext>> {
proc::signal::do_signal_return()
fn do_signal_return(
ctx: ProcessCtx,
) -> impl Future<Output = Result<<Self as Arch>::UserContext>> {
proc::signal::do_signal_return(ctx)
}
fn context_switch(new: Arc<Task>) {

View File

@@ -5,7 +5,7 @@ use crate::{
process::thread_group::signal::{
SigId, ksigaction::UserspaceSigAction, sigaction::SigActionFlags,
},
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
error::Result,
@@ -26,8 +26,12 @@ struct RtSigFrame {
// information regarding this task's context and is made up of PoDs.
unsafe impl UserCopyable for RtSigFrame {}
pub async fn do_signal(id: SigId, sa: UserspaceSigAction) -> Result<ExceptionState> {
let task = current_task();
pub async fn do_signal(
ctx: ProcessCtx,
id: SigId,
sa: UserspaceSigAction,
) -> Result<ExceptionState> {
let task = ctx.task();
let mut signal = task.process.signals.lock_save_irq();
let saved_state = *task.ctx.user();
@@ -57,7 +61,6 @@ pub async fn do_signal(id: SigId, sa: UserspaceSigAction) -> Result<ExceptionSta
};
drop(signal);
drop(task);
copy_to_user(addr, frame).await?;
@@ -69,8 +72,8 @@ pub async fn do_signal(id: SigId, sa: UserspaceSigAction) -> Result<ExceptionSta
Ok(new_state)
}
pub async fn do_signal_return() -> Result<ExceptionState> {
let task = current_task();
pub async fn do_signal_return(ctx: ProcessCtx) -> Result<ExceptionState> {
let task = ctx.task();
let sig_frame_addr: TUA<RtSigFrame> = TUA::from_value(task.ctx.user().sp_el0 as _);

View File

@@ -15,6 +15,7 @@ use crate::{
owned::OwnedTask,
thread_group::signal::{SigId, ksigaction::UserspaceSigAction},
},
sched::syscall_ctx::ProcessCtx,
};
use alloc::string::String;
use alloc::sync::Arc;
@@ -59,12 +60,15 @@ pub trait Arch: CpuOps + VirtualMemory {
/// Call a user-specified signal handler in the current process.
fn do_signal(
ctx: ProcessCtx,
sig: SigId,
action: UserspaceSigAction,
) -> impl Future<Output = Result<<Self as Arch>::UserContext>>;
/// Return from a userspace signal handler.
fn do_signal_return() -> impl Future<Output = Result<<Self as Arch>::UserContext>>;
fn do_signal_return(
ctx: ProcessCtx,
) -> impl Future<Output = Result<<Self as Arch>::UserContext>>;
/// Copies a block of memory from userspace to the kernel.
///

View File

@@ -7,15 +7,19 @@ use libkernel::{
use super::{ClockId, realtime::date, timespec::TimeSpec};
use crate::drivers::timer::{Instant, now};
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::{drivers::timer::uptime, memory::uaccess::copy_to_user};
pub async fn sys_clock_gettime(clockid: i32, time_spec: TUA<TimeSpec>) -> Result<usize> {
pub async fn sys_clock_gettime(
ctx: &ProcessCtx,
clockid: i32,
time_spec: TUA<TimeSpec>,
) -> Result<usize> {
let time = match ClockId::try_from(clockid).map_err(|_| KernelError::InvalidValue)? {
ClockId::Realtime => date(),
ClockId::Monotonic => uptime(),
ClockId::ProcessCpuTimeId => {
let task = current_task_shared();
let task = ctx.shared();
let total_time = task.process.stime.load(Ordering::Relaxed) as u64
+ task.process.utime.load(Ordering::Relaxed) as u64;
let last_update = Instant::from_user_normalized(
@@ -26,7 +30,7 @@ pub async fn sys_clock_gettime(clockid: i32, time_spec: TUA<TimeSpec>) -> Result
Duration::from(Instant::from_user_normalized(total_time)) + delta
}
ClockId::ThreadCpuTimeId => {
let task = current_task_shared();
let task = ctx.shared();
let total_time = task.stime.load(Ordering::Relaxed) as u64
+ task.utime.load(Ordering::Relaxed) as u64;
let last_update =

View File

@@ -6,7 +6,7 @@ use crate::{
fs::open_file::OpenFile,
kernel_driver,
process::fd_table::Fd,
sched::current::current_task,
sched::current_work,
};
use alloc::{string::ToString, sync::Arc};
use libkernel::{
@@ -21,11 +21,9 @@ struct TtyDev {}
impl OpenableDevice for TtyDev {
fn open(&self, _args: OpenFlags) -> Result<Arc<OpenFile>> {
let task = current_task();
// TODO: This should really open the controlling terminal of the
// session.
Ok(task
Ok(current_work()
.fd_table
.lock_save_irq()
.get(Fd(0))

View File

@@ -6,7 +6,7 @@ use crate::{
Pgid,
signal::{InterruptResult, Interruptable},
},
sched::current::current_task,
sched::current_work,
sync::SpinLock,
};
use alloc::{boxed::Box, sync::Arc};
@@ -191,7 +191,7 @@ impl FileOps for Tty {
.meta
.lock_save_irq()
.fg_pg
.unwrap_or_else(|| *current_task().process.pgid.lock_save_irq());
.unwrap_or_else(|| *current_work().process.pgid.lock_save_irq());
copy_to_user(TUA::from_value(argp), fg_pg).await?;

View File

@@ -7,7 +7,7 @@ use crate::kernel::rand::entropy_pool;
use crate::process::thread_group::Pgid;
use crate::process::thread_group::signal::SigId;
use crate::process::thread_group::signal::kill::send_signal_to_pg;
use crate::sched::current::current_task;
use crate::sched::current_work;
use crate::sync::{CondVar, SpinLock};
use alloc::{sync::Arc, vec::Vec};
use libkernel::error::Result;
@@ -80,7 +80,7 @@ impl TtyInputHandler for SpinLock<TtyInputCooker> {
let pgid: Pgid = {
let meta = this.meta.lock_save_irq();
meta.fg_pg
.unwrap_or_else(|| *current_task().process.pgid.lock_save_irq())
.unwrap_or_else(|| *current_work().process.pgid.lock_save_irq())
};
drop(this);

View File

@@ -4,7 +4,7 @@ use crate::drivers::fs::proc::meminfo::ProcMeminfoInode;
use crate::drivers::fs::proc::stat::ProcStatInode;
use crate::drivers::fs::proc::task::ProcTaskInode;
use crate::process::{TASK_LIST, TaskDescriptor, Tid};
use crate::sched::current::current_task;
use crate::sched::current_work;
use alloc::boxed::Box;
use alloc::string::ToString;
use alloc::sync::Arc;
@@ -40,13 +40,13 @@ impl Inode for ProcRootInode {
}
async fn lookup(&self, name: &str) -> error::Result<Arc<dyn Inode>> {
let current = current_work();
// Lookup a PID directory.
let desc = if name == "self" {
let current_task = current_task();
TaskDescriptor::from_tgid_tid(current_task.pgid(), Tid::from_tgid(current_task.pgid()))
TaskDescriptor::from_tgid_tid(current.pgid(), Tid::from_tgid(current.pgid()))
} else if name == "thread-self" {
let current_task = current_task();
current_task.descriptor()
current.descriptor()
} else if name == "stat" {
return Ok(Arc::new(ProcStatInode::new(
InodeId::from_fsid_and_inodeid(self.id.fs_id(), get_inode_id(&["stat"])),
@@ -102,12 +102,14 @@ impl Inode for ProcRootInode {
next_offset,
));
}
let current_task = current_task();
let current = current_work();
entries.push(Dirent::new(
"self".to_string(),
InodeId::from_fsid_and_inodeid(
PROCFS_ID,
get_inode_id(&[&current_task.descriptor().tgid().value().to_string()]),
get_inode_id(&[&current.descriptor().tgid().value().to_string()]),
),
FileType::Directory,
(entries.len() + 1) as u64,
@@ -116,7 +118,7 @@ impl Inode for ProcRootInode {
"thread-self".to_string(),
InodeId::from_fsid_and_inodeid(
PROCFS_ID,
get_inode_id(&[&current_task.descriptor().tid().value().to_string()]),
get_inode_id(&[&current.descriptor().tid().value().to_string()]),
),
FileType::Directory,
(entries.len() + 1) as u64,

View File

@@ -1,7 +1,7 @@
use crate::drivers::fs::proc::{get_inode_id, procfs};
use crate::process::fd_table::Fd;
use crate::process::{TaskDescriptor, find_task_by_descriptor};
use crate::sched::current::current_task_shared;
use crate::sched::current_work;
use alloc::borrow::ToOwned;
use alloc::boxed::Box;
use alloc::format;
@@ -55,7 +55,7 @@ impl Inode for ProcFdInode {
async fn lookup(&self, name: &str) -> Result<Arc<dyn Inode>> {
let fd: i32 = name.parse().map_err(|_| FsError::NotFound)?;
let task = current_task_shared();
let task = current_work();
let fd_table = task.fd_table.lock_save_irq();
if fd_table.get(Fd(fd)).is_none() {
return Err(FsError::NotFound.into());

View File

@@ -10,7 +10,7 @@ use libkernel::{
use ringbuf::Arc;
use crate::{
memory::uaccess::copy_to_user_slice, process::fd_table::Fd, sched::current::current_task_shared,
memory::uaccess::copy_to_user_slice, process::fd_table::Fd, sched::syscall_ctx::ProcessCtx,
};
use super::{fops::FileOps, open_file::FileCtx};
@@ -133,8 +133,8 @@ struct Dirent64Hdr {
_kind: DirentFileType,
}
pub async fn sys_getdents64(fd: Fd, mut ubuf: UA, size: u32) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_getdents64(ctx: &ProcessCtx, fd: Fd, mut ubuf: UA, size: u32) -> Result<usize> {
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -6,7 +6,7 @@ use crate::{
fd_table::Fd,
thread_group::signal::{InterruptResult, Interruptable, SigId},
},
sched::current::current_task,
sched::{current_work, syscall_ctx::ProcessCtx},
sync::CondVar,
};
use alloc::{boxed::Box, sync::Arc};
@@ -180,7 +180,7 @@ impl PipeWriter {
// buffer. There's no point writing data if there's no consumer!
if gone_fut.as_mut().poll(cx).is_ready() {
// Other side of the pipe has been closed.
current_task().raise_task_signal(SigId::SIGPIPE);
current_work().process.deliver_signal(SigId::SIGPIPE);
Poll::Ready(Err(KernelError::BrokenPipe))
} else if let Poll::Ready(x) = write_fut.as_mut().poll(cx) {
Poll::Ready(x)
@@ -240,7 +240,7 @@ impl Drop for PipeWriter {
}
}
pub async fn sys_pipe2(fds: TUA<[Fd; 2]>, flags: u32) -> Result<usize> {
pub async fn sys_pipe2(ctx: &ProcessCtx, fds: TUA<[Fd; 2]>, flags: u32) -> Result<usize> {
let flags = OpenFlags::from_bits_retain(flags);
let kbuf = KPipe::new()?;
@@ -259,12 +259,10 @@ pub async fn sys_pipe2(fds: TUA<[Fd; 2]>, flags: u32) -> Result<usize> {
let (read_fd, write_fd) = {
static INODE_ID: AtomicU64 = AtomicU64::new(0);
let task = current_task();
let mut fds = task.fd_table.lock_save_irq();
let mut fds = ctx.task().fd_table.lock_save_irq();
let inode = {
let creds = task.creds.lock_save_irq();
let creds = ctx.task().creds.lock_save_irq();
Arc::new(PipeInode {
id: InodeId::from_fsid_and_inodeid(0xf, INODE_ID.fetch_add(1, Ordering::Relaxed)),
time: date(),

View File

@@ -1,7 +1,7 @@
use super::{AtFlags, resolve_at_start_node};
use crate::{
fs::syscalls::at::resolve_path_flags, memory::uaccess::cstr::UserCStr, process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use core::ffi::c_char;
use libkernel::{
@@ -10,18 +10,29 @@ use libkernel::{
memory::address::TUA,
};
pub async fn sys_faccessat(dirfd: Fd, path: TUA<c_char>, mode: i32) -> Result<usize> {
sys_faccessat2(dirfd, path, mode, 0).await
pub async fn sys_faccessat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
mode: i32,
) -> Result<usize> {
sys_faccessat2(ctx, dirfd, path, mode, 0).await
}
pub async fn sys_faccessat2(dirfd: Fd, path: TUA<c_char>, mode: i32, flags: i32) -> Result<usize> {
pub async fn sys_faccessat2(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
mode: i32,
flags: i32,
) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let access_mode = AccessMode::from_bits_retain(mode);
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let at_flags = AtFlags::from_bits_retain(flags);
let start_node = resolve_at_start_node(dirfd, path, at_flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, at_flags).await?;
let node = resolve_path_flags(dirfd, path, start_node, &task, at_flags).await?;
// If mode is F_OK (value 0), the check is for the file's existence.

View File

@@ -12,7 +12,7 @@ use crate::{
fs::syscalls::at::{AtFlags, resolve_at_start_node, resolve_path_flags},
memory::uaccess::cstr::UserCStr,
process::{Task, fd_table::Fd},
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
pub fn can_chmod(task: Arc<Task>, uid: Uid) -> bool {
@@ -20,14 +20,20 @@ pub fn can_chmod(task: Arc<Task>, uid: Uid) -> bool {
creds.caps().is_capable(CapabilitiesFlags::CAP_FOWNER) || creds.uid() == uid
}
pub async fn sys_fchmodat(dirfd: Fd, path: TUA<c_char>, mode: u16, flags: i32) -> Result<usize> {
pub async fn sys_fchmodat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
mode: u16,
flags: i32,
) -> Result<usize> {
let flags = AtFlags::from_bits_retain(flags);
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = resolve_at_start_node(dirfd, path, flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, flags).await?;
let mode = FilePermissions::from_bits_retain(mode);
let node = resolve_path_flags(dirfd, path, start_node, &task, flags).await?;

View File

@@ -14,10 +14,11 @@ use crate::{
fs::syscalls::at::{AtFlags, resolve_at_start_node, resolve_path_flags},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
pub async fn sys_fchownat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
owner: i32,
@@ -26,10 +27,10 @@ pub async fn sys_fchownat(
) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let flags = AtFlags::from_bits_retain(flags);
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = resolve_at_start_node(dirfd, path, flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, flags).await?;
let node = resolve_path_flags(dirfd, path, start_node, &task, flags).await?;
let mut attr = node.getattr().await?;

View File

@@ -14,10 +14,11 @@ use crate::{
},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
pub async fn sys_linkat(
ctx: &ProcessCtx,
old_dirfd: Fd,
old_path: TUA<c_char>,
new_dirfd: Fd,
@@ -27,7 +28,7 @@ pub async fn sys_linkat(
let mut buf = [0; 1024];
let mut buf2 = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let mut flags = AtFlags::from_bits_retain(flags);
// following symlinks is implied for any other syscall.
@@ -57,8 +58,8 @@ pub async fn sys_linkat(
.copy_from_user(&mut buf2)
.await?,
);
let old_start_node = resolve_at_start_node(old_dirfd, old_path, flags).await?;
let new_start_node = resolve_at_start_node(new_dirfd, new_path, flags).await?;
let old_start_node = resolve_at_start_node(ctx, old_dirfd, old_path, flags).await?;
let new_start_node = resolve_at_start_node(ctx, new_dirfd, new_path, flags).await?;
let target_inode =
resolve_path_flags(old_dirfd, old_path, old_start_node.clone(), &task, flags).await?;

View File

@@ -2,22 +2,23 @@ use crate::fs::VFS;
use crate::fs::syscalls::at::{AtFlags, resolve_at_start_node};
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use core::ffi::c_char;
use libkernel::fs::attr::FilePermissions;
use libkernel::fs::path::Path;
use libkernel::memory::address::TUA;
pub async fn sys_mkdirat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
mode: u16,
) -> libkernel::error::Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = resolve_at_start_node(dirfd, path, AtFlags::empty()).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, AtFlags::empty()).await?;
let mode = FilePermissions::from_bits_retain(mode);
VFS.mkdir(path, start_node, mode, &task).await?;

View File

@@ -1,7 +1,7 @@
use crate::{
fs::{DummyInode, VFS},
process::{Task, fd_table::Fd},
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use alloc::sync::Arc;
use libkernel::{
@@ -37,12 +37,17 @@ bitflags::bitflags! {
/// Given the paraters to one of the sys_{action}at syscalls, resolve the
/// arguments to a start node to which path should be applied.
async fn resolve_at_start_node(dirfd: Fd, path: &Path, flags: AtFlags) -> Result<Arc<dyn Inode>> {
async fn resolve_at_start_node(
ctx: &ProcessCtx,
dirfd: Fd,
path: &Path,
flags: AtFlags,
) -> Result<Arc<dyn Inode>> {
if flags.contains(AtFlags::AT_EMPTY_PATH) && path.as_str().is_empty() {
// just return a dummy, since it'll operate on dirfd anyways
return Ok(Arc::new(DummyInode {}));
}
let task = current_task_shared();
let task = ctx.shared().clone();
let start_node: Arc<dyn Inode> = if path.is_absolute() {
// Absolute path ignores dirfd.

View File

@@ -2,7 +2,7 @@ use crate::{
fs::{VFS, syscalls::at::AtFlags},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use core::ffi::c_char;
use libkernel::{
@@ -13,13 +13,19 @@ use libkernel::{
use super::resolve_at_start_node;
pub async fn sys_openat(dirfd: Fd, path: TUA<c_char>, flags: u32, mode: u16) -> Result<usize> {
pub async fn sys_openat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
flags: u32,
mode: u16,
) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let flags = OpenFlags::from_bits_truncate(flags);
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = resolve_at_start_node(dirfd, path, AtFlags::empty()).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, AtFlags::empty()).await?;
let mode = FilePermissions::from_bits_retain(mode);
let file = VFS.open(path, flags, start_node, mode, &task).await?;

View File

@@ -5,7 +5,7 @@ use crate::{
},
memory::uaccess::{copy_to_user_slice, cstr::UserCStr},
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use core::{cmp::min, ffi::c_char};
use libkernel::{
@@ -14,17 +14,23 @@ use libkernel::{
memory::address::{TUA, UA},
};
pub async fn sys_readlinkat(dirfd: Fd, path: TUA<c_char>, buf: UA, size: usize) -> Result<usize> {
pub async fn sys_readlinkat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
buf: UA,
size: usize,
) -> Result<usize> {
let mut path_buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let path = Path::new(
UserCStr::from_ptr(path)
.copy_from_user(&mut path_buf)
.await?,
);
let start = resolve_at_start_node(dirfd, path, AtFlags::empty()).await?;
let start = resolve_at_start_node(ctx, dirfd, path, AtFlags::empty()).await?;
let name = path.file_name().ok_or(FsError::InvalidInput)?;
let parent = if let Some(p) = path.parent() {

View File

@@ -14,7 +14,7 @@ use crate::{
},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
// from linux/fcntl.h
@@ -23,15 +23,17 @@ const AT_RENAME_EXCHANGE: u32 = 0x0002; // Atomically exchange the target and so
const AT_RENAME_WHITEOUT: u32 = 0x0004; // Create a whiteout entry for the old path.
pub async fn sys_renameat(
ctx: &ProcessCtx,
old_dirfd: Fd,
old_path: TUA<c_char>,
new_dirfd: Fd,
new_path: TUA<c_char>,
) -> Result<usize> {
sys_renameat2(old_dirfd, old_path, new_dirfd, new_path, 0).await
sys_renameat2(ctx, old_dirfd, old_path, new_dirfd, new_path, 0).await
}
pub async fn sys_renameat2(
ctx: &ProcessCtx,
old_dirfd: Fd,
old_path: TUA<c_char>,
new_dirfd: Fd,
@@ -53,7 +55,7 @@ pub async fn sys_renameat2(
let mut buf = [0; 1024];
let mut buf2 = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let old_path = Path::new(
UserCStr::from_ptr(old_path)
@@ -68,8 +70,8 @@ pub async fn sys_renameat2(
let old_name = old_path.file_name().ok_or(FsError::InvalidInput)?;
let new_name = new_path.file_name().ok_or(FsError::InvalidInput)?;
let old_start_node = resolve_at_start_node(old_dirfd, old_path, AtFlags::empty()).await?;
let new_start_node = resolve_at_start_node(new_dirfd, new_path, AtFlags::empty()).await?;
let old_start_node = resolve_at_start_node(ctx, old_dirfd, old_path, AtFlags::empty()).await?;
let new_start_node = resolve_at_start_node(ctx, new_dirfd, new_path, AtFlags::empty()).await?;
let old_parent_inode = if let Some(parent_path) = old_path.parent() {
VFS.resolve_path(parent_path, old_start_node.clone(), &task)

View File

@@ -2,7 +2,7 @@ use crate::{
fs::syscalls::at::{resolve_at_start_node, resolve_path_flags},
memory::uaccess::{UserCopyable, copy_to_user, cstr::UserCStr},
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use core::ffi::c_char;
use libkernel::{
@@ -68,6 +68,7 @@ impl From<FileAttr> for Stat {
}
pub async fn sys_newfstatat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
statbuf: TUA<Stat>,
@@ -75,11 +76,11 @@ pub async fn sys_newfstatat(
) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let flags = AtFlags::from_bits_truncate(flags);
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = match resolve_at_start_node(dirfd, path, flags).await {
let start_node = match resolve_at_start_node(ctx, dirfd, path, flags).await {
Ok(node) => node,
Err(err) if err != KernelError::NotSupported => panic!("{err}"),
Err(err) => return Err(err),

View File

@@ -5,7 +5,7 @@ use crate::{
},
memory::uaccess::{UserCopyable, copy_to_user, cstr::UserCStr},
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use core::{ffi::c_char, time::Duration};
use libkernel::{error::Result, fs::path::Path, memory::address::TUA};
@@ -116,6 +116,7 @@ impl From<Duration> for StatXTimestamp {
unsafe impl UserCopyable for StatX {}
pub async fn sys_statx(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
flags: i32,
@@ -124,12 +125,12 @@ pub async fn sys_statx(
) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let flags = AtFlags::from_bits_truncate(flags);
let mask = StatXMask::from_bits_truncate(mask);
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let start_node = resolve_at_start_node(dirfd, path, flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, flags).await?;
let node = resolve_path_flags(dirfd, path, start_node, &task, flags).await?;
let attr = node.getattr().await?;

View File

@@ -9,10 +9,11 @@ use crate::{
},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
pub async fn sys_symlinkat(
ctx: &ProcessCtx,
old_name: TUA<c_char>,
new_dirfd: Fd,
new_name: TUA<c_char>,
@@ -20,7 +21,7 @@ pub async fn sys_symlinkat(
let mut buf = [0; 1024];
let mut buf2 = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let source = Path::new(
UserCStr::from_ptr(old_name)
.copy_from_user(&mut buf)
@@ -31,7 +32,7 @@ pub async fn sys_symlinkat(
.copy_from_user(&mut buf2)
.await?,
);
let start_node = resolve_at_start_node(new_dirfd, target, AtFlags::empty()).await?;
let start_node = resolve_at_start_node(ctx, new_dirfd, target, AtFlags::empty()).await?;
VFS.symlink(source, target, start_node, &task).await?;

View File

@@ -9,7 +9,7 @@ use crate::{
},
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
// As defined in linux/fcntl.h ─ enables directory removal via unlinkat.
@@ -20,16 +20,21 @@ const AT_REMOVEDIR: u32 = 0x200;
/// The semantics are:
/// - If `flags & AT_REMOVEDIR` is set, behave like `rmdir`.
/// - Otherwise behave like `unlink`.
pub async fn sys_unlinkat(dirfd: Fd, path: TUA<c_char>, flags: u32) -> Result<usize> {
pub async fn sys_unlinkat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
flags: u32,
) -> Result<usize> {
// Copy the user-provided path into kernel memory.
let mut buf = [0u8; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
// Determine the starting inode for path resolution.
let flags = AtFlags::from_bits_retain(flags as _);
let start_node = resolve_at_start_node(dirfd, path, flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, flags).await?;
let remove_dir = flags.bits() as u32 & AT_REMOVEDIR != 0;

View File

@@ -17,19 +17,20 @@ use crate::{
fs::syscalls::at::{AtFlags, resolve_at_start_node, resolve_path_flags},
memory::uaccess::{copy_from_user, cstr::UserCStr},
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
const UTIME_NOW: u64 = (1 << 30) - 1;
const UTIME_OMIT: u64 = (1 << 30) - 2;
pub async fn sys_utimensat(
ctx: &ProcessCtx,
dirfd: Fd,
path: TUA<c_char>,
times: TUA<[TimeSpec; 2]>,
flags: i32,
) -> Result<usize> {
let task = current_task_shared();
let task = ctx.shared().clone();
// linux specifically uses NULL path to indicate futimens, see utimensat(2)
let node = if path.is_null() {
@@ -44,7 +45,7 @@ pub async fn sys_utimensat(
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let flags = AtFlags::from_bits_retain(flags);
let start_node = resolve_at_start_node(dirfd, path, flags).await?;
let start_node = resolve_at_start_node(ctx, dirfd, path, flags).await?;
resolve_path_flags(dirfd, path, start_node, &task, flags).await?
};

View File

@@ -2,7 +2,7 @@ use crate::{
fs::VFS,
memory::uaccess::{copy_to_user_slice, cstr::UserCStr},
process::fd_table::Fd,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use alloc::{borrow::ToOwned, ffi::CString, string::ToString};
use core::{ffi::c_char, str::FromStr};
@@ -13,8 +13,8 @@ use libkernel::{
proc::caps::CapabilitiesFlags,
};
pub async fn sys_getcwd(buf: UA, len: usize) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_getcwd(ctx: &ProcessCtx, buf: UA, len: usize) -> Result<usize> {
let task = ctx.shared().clone();
let path = task.cwd.lock_save_irq().1.as_str().to_string();
let cstr = CString::from_str(&path).map_err(|_| KernelError::InvalidValue)?;
let slice = cstr.as_bytes_with_nul();
@@ -28,11 +28,11 @@ pub async fn sys_getcwd(buf: UA, len: usize) -> Result<usize> {
Ok(buf.value())
}
pub async fn sys_chdir(path: TUA<c_char>) -> Result<usize> {
pub async fn sys_chdir(ctx: &ProcessCtx, path: TUA<c_char>) -> Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let current_path = task.cwd.lock_save_irq().0.clone();
let new_path = task.cwd.lock_save_irq().1.join(path);
@@ -43,8 +43,8 @@ pub async fn sys_chdir(path: TUA<c_char>) -> Result<usize> {
Ok(0)
}
pub async fn sys_chroot(path: TUA<c_char>) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_chroot(ctx: &ProcessCtx, path: TUA<c_char>) -> Result<usize> {
let task = ctx.shared().clone();
task.creds
.lock_save_irq()
.caps()
@@ -63,8 +63,8 @@ pub async fn sys_chroot(path: TUA<c_char>) -> Result<usize> {
Ok(0)
}
pub async fn sys_fchdir(fd: Fd) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fchdir(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -4,10 +4,10 @@ use libkernel::{
fs::attr::FilePermissions,
};
use crate::{process::fd_table::Fd, sched::current::current_task_shared};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
pub async fn sys_fchmod(fd: Fd, mode: u16) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fchmod(ctx: &ProcessCtx, fd: Fd, mode: u16) -> Result<usize> {
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -6,10 +6,10 @@ use libkernel::{
},
};
use crate::{process::fd_table::Fd, sched::current::current_task_shared};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
pub async fn sys_fchown(fd: Fd, owner: i32, group: i32) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fchown(ctx: &ProcessCtx, fd: Fd, owner: i32, group: i32) -> Result<usize> {
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -1,10 +1,11 @@
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use alloc::sync::Arc;
use bitflags::bitflags;
use libkernel::error::{KernelError, Result};
async fn close(fd: Fd) -> Result<()> {
let file = current_task()
async fn close(ctx: &ProcessCtx, fd: Fd) -> Result<()> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.remove(fd)
@@ -17,8 +18,8 @@ async fn close(fd: Fd) -> Result<()> {
Ok(())
}
pub async fn sys_close(fd: Fd) -> Result<usize> {
close(fd).await?;
pub async fn sys_close(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
close(ctx, fd).await?;
Ok(0)
}
@@ -29,7 +30,7 @@ bitflags! {
}
}
pub async fn sys_close_range(first: Fd, last: Fd, flags: i32) -> Result<usize> {
pub async fn sys_close_range(ctx: &ProcessCtx, first: Fd, last: Fd, flags: i32) -> Result<usize> {
let flags = CloseRangeFlags::from_bits_truncate(flags);
if flags.contains(CloseRangeFlags::CLOSE_RANGE_UNSHARE) {
todo!("Implement CLOSE_RANGE_UNSHARE");
@@ -39,7 +40,7 @@ pub async fn sys_close_range(first: Fd, last: Fd, flags: i32) -> Result<usize> {
}
for i in first.as_raw()..=last.as_raw() {
close(Fd(i)).await?;
close(ctx, Fd(i)).await?;
}
Ok(0)
}

View File

@@ -5,9 +5,10 @@ use libkernel::memory::address::TUA;
use crate::kernel::kpipe::KPipe;
use crate::memory::uaccess::{copy_from_user, copy_to_user};
use crate::process::fd_table::Fd;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
pub async fn sys_copy_file_range(
ctx: &ProcessCtx,
fd_in: Fd,
off_in: TUA<i32>,
fd_out: Fd,
@@ -44,7 +45,7 @@ pub async fn sys_copy_file_range(
};
let (reader, writer) = {
let task = current_task();
let task = ctx.shared();
let fds = task.fd_table.lock_save_irq();
let reader = fds.get(fd_in).ok_or(KernelError::BadFd)?;

View File

@@ -2,7 +2,7 @@ use crate::fs::VFS;
use crate::memory::uaccess::copy_to_user_slice;
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::sync::Arc;
use core::ffi::c_char;
use libkernel::error::{KernelError, Result};
@@ -21,6 +21,7 @@ async fn getxattr(node: Arc<dyn Inode>, name: &str, ua: UA, size: usize) -> Resu
}
pub async fn sys_getxattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
value: UA,
@@ -29,7 +30,7 @@ pub async fn sys_getxattr(
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
let mut buf = [0; 1024];
@@ -43,6 +44,7 @@ pub async fn sys_getxattr(
}
pub async fn sys_lgetxattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
value: UA,
@@ -51,7 +53,7 @@ pub async fn sys_lgetxattr(
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS
.resolve_path_nofollow(path, VFS.root_inode(), &task)
@@ -66,9 +68,15 @@ pub async fn sys_lgetxattr(
.await
}
pub async fn sys_fgetxattr(fd: Fd, name: TUA<c_char>, value: UA, size: usize) -> Result<usize> {
pub async fn sys_fgetxattr(
ctx: &ProcessCtx,
fd: Fd,
name: TUA<c_char>,
value: UA,
size: usize,
) -> Result<usize> {
let node = {
let task = current_task_shared();
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -1,8 +1,9 @@
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use libkernel::error::{KernelError, Result};
pub async fn sys_ioctl(fd: Fd, request: usize, arg: usize) -> Result<usize> {
let fd = current_task()
pub async fn sys_ioctl(ctx: &ProcessCtx, fd: Fd, request: usize, arg: usize) -> Result<usize> {
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,7 +1,7 @@
use crate::{
memory::uaccess::{UserCopyable, copy_obj_array_from_user},
process::fd_table::Fd,
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
error::{KernelError, Result},
@@ -18,8 +18,14 @@ pub struct IoVec {
// SAFETY: An IoVec is safe to copy to-and-from userspace.
unsafe impl UserCopyable for IoVec {}
pub async fn sys_writev(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize) -> Result<usize> {
let file = current_task()
pub async fn sys_writev(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -32,8 +38,14 @@ pub async fn sys_writev(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize) -> Result<us
ops.writev(state, &iovs).await
}
pub async fn sys_readv(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize) -> Result<usize> {
let file = current_task()
pub async fn sys_readv(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -46,22 +58,36 @@ pub async fn sys_readv(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize) -> Result<usi
ops.readv(state, &iovs).await
}
pub async fn sys_pwritev(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize, offset: u64) -> Result<usize> {
sys_pwritev2(fd, iov_ptr, no_iov, offset, 0).await
pub async fn sys_pwritev(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
offset: u64,
) -> Result<usize> {
sys_pwritev2(ctx, fd, iov_ptr, no_iov, offset, 0).await
}
pub async fn sys_preadv(fd: Fd, iov_ptr: TUA<IoVec>, no_iov: usize, offset: u64) -> Result<usize> {
sys_preadv2(fd, iov_ptr, no_iov, offset, 0).await
pub async fn sys_preadv(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
offset: u64,
) -> Result<usize> {
sys_preadv2(ctx, fd, iov_ptr, no_iov, offset, 0).await
}
pub async fn sys_pwritev2(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
offset: u64,
_flags: u32, // TODO: implement these flags
) -> Result<usize> {
let file = current_task()
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -75,13 +101,15 @@ pub async fn sys_pwritev2(
}
pub async fn sys_preadv2(
ctx: &ProcessCtx,
fd: Fd,
iov_ptr: TUA<IoVec>,
no_iov: usize,
offset: u64,
_flags: u32,
) -> Result<usize> {
let file = current_task()
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -2,7 +2,7 @@ use crate::fs::VFS;
use crate::memory::uaccess::copy_to_user_slice;
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::sync::Arc;
use libkernel::error::{KernelError, Result};
use libkernel::fs::Inode;
@@ -22,21 +22,31 @@ async fn listxattr(node: Arc<dyn Inode>, ua: UA, size: usize) -> Result<usize> {
}
}
pub async fn sys_listxattr(path: TUA<core::ffi::c_char>, list: UA, size: usize) -> Result<usize> {
pub async fn sys_listxattr(
ctx: &ProcessCtx,
path: TUA<core::ffi::c_char>,
list: UA,
size: usize,
) -> Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
listxattr(node, list, size).await
}
pub async fn sys_llistxattr(path: TUA<core::ffi::c_char>, list: UA, size: usize) -> Result<usize> {
pub async fn sys_llistxattr(
ctx: &ProcessCtx,
path: TUA<core::ffi::c_char>,
list: UA,
size: usize,
) -> Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS
.resolve_path_nofollow(path, VFS.root_inode(), &task)
@@ -44,9 +54,9 @@ pub async fn sys_llistxattr(path: TUA<core::ffi::c_char>, list: UA, size: usize)
listxattr(node, list, size).await
}
pub async fn sys_flistxattr(fd: Fd, list: UA, size: usize) -> Result<usize> {
pub async fn sys_flistxattr(ctx: &ProcessCtx, fd: Fd, list: UA, size: usize) -> Result<usize> {
let node = {
let task = current_task_shared();
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -1,7 +1,7 @@
use crate::fs::VFS;
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::sync::Arc;
use core::ffi::c_char;
use libkernel::error::{KernelError, Result};
@@ -14,11 +14,15 @@ async fn removexattr(node: Arc<dyn Inode>, name: &str) -> Result<()> {
Ok(())
}
pub async fn sys_removexattr(path: TUA<c_char>, name: TUA<c_char>) -> Result<usize> {
pub async fn sys_removexattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
) -> Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
let mut buf = [0; 1024];
@@ -30,11 +34,15 @@ pub async fn sys_removexattr(path: TUA<c_char>, name: TUA<c_char>) -> Result<usi
Ok(0)
}
pub async fn sys_lremovexattr(path: TUA<c_char>, name: TUA<c_char>) -> Result<usize> {
pub async fn sys_lremovexattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
) -> Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS
.resolve_path_nofollow(path, VFS.root_inode(), &task)
@@ -48,9 +56,9 @@ pub async fn sys_lremovexattr(path: TUA<c_char>, name: TUA<c_char>) -> Result<us
Ok(0)
}
pub async fn sys_fremovexattr(fd: Fd, name: TUA<c_char>) -> Result<usize> {
pub async fn sys_fremovexattr(ctx: &ProcessCtx, fd: Fd, name: TUA<c_char>) -> Result<usize> {
let node = {
let task = current_task_shared();
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -1,11 +1,12 @@
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use libkernel::{
error::{KernelError, Result},
memory::address::UA,
};
pub async fn sys_write(fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
let file = current_task()
pub async fn sys_write(ctx: &ProcessCtx, fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -16,8 +17,9 @@ pub async fn sys_write(fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
ops.write(ctx, user_buf, count).await
}
pub async fn sys_read(fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
let file = current_task()
pub async fn sys_read(ctx: &ProcessCtx, fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -28,8 +30,15 @@ pub async fn sys_read(fd: Fd, user_buf: UA, count: usize) -> Result<usize> {
ops.read(ctx, user_buf, count).await
}
pub async fn sys_pwrite64(fd: Fd, user_buf: UA, count: usize, offset: u64) -> Result<usize> {
let file = current_task()
pub async fn sys_pwrite64(
ctx: &ProcessCtx,
fd: Fd,
user_buf: UA,
count: usize,
offset: u64,
) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -40,8 +49,15 @@ pub async fn sys_pwrite64(fd: Fd, user_buf: UA, count: usize, offset: u64) -> Re
ops.writeat(user_buf, count, offset).await
}
pub async fn sys_pread64(fd: Fd, user_buf: UA, count: usize, offset: u64) -> Result<usize> {
let file = current_task()
pub async fn sys_pread64(
ctx: &ProcessCtx,
fd: Fd,
user_buf: UA,
count: usize,
offset: u64,
) -> Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,4 +1,4 @@
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use libkernel::{
error::{KernelError, Result},
fs::SeekFrom,
@@ -8,7 +8,7 @@ const SEEK_SET: i32 = 0;
const SEEK_CUR: i32 = 1;
const SEEK_END: i32 = 2;
pub async fn sys_lseek(fd: Fd, offset: isize, whence: i32) -> Result<usize> {
pub async fn sys_lseek(ctx: &ProcessCtx, fd: Fd, offset: isize, whence: i32) -> Result<usize> {
let seek_from = match whence {
SEEK_SET => SeekFrom::Start(offset as _),
SEEK_CUR => SeekFrom::Current(offset as _),
@@ -16,7 +16,8 @@ pub async fn sys_lseek(fd: Fd, offset: isize, whence: i32) -> Result<usize> {
_ => return Err(KernelError::InvalidValue),
};
let fd = current_task()
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -2,7 +2,7 @@ use crate::fs::VFS;
use crate::memory::uaccess::copy_from_user_slice;
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::sync::Arc;
use alloc::vec;
use bitflags::bitflags;
@@ -51,6 +51,7 @@ async fn setxattr(
}
pub async fn sys_setxattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
value: UA,
@@ -60,7 +61,7 @@ pub async fn sys_setxattr(
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
let mut buf = [0; 1024];
@@ -75,6 +76,7 @@ pub async fn sys_setxattr(
}
pub async fn sys_lsetxattr(
ctx: &ProcessCtx,
path: TUA<c_char>,
name: TUA<c_char>,
value: UA,
@@ -84,7 +86,7 @@ pub async fn sys_lsetxattr(
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let task = current_task_shared();
let task = ctx.shared().clone();
let node = VFS
.resolve_path_nofollow(path, VFS.root_inode(), &task)
@@ -101,6 +103,7 @@ pub async fn sys_lsetxattr(
}
pub async fn sys_fsetxattr(
ctx: &ProcessCtx,
fd: Fd,
name: TUA<c_char>,
value: UA,
@@ -108,7 +111,7 @@ pub async fn sys_fsetxattr(
flags: i32,
) -> Result<usize> {
let node = {
let task = current_task_shared();
let task = ctx.shared().clone();
let file = task
.fd_table
.lock_save_irq()

View File

@@ -1,4 +1,4 @@
use crate::{kernel::kpipe::KPipe, process::fd_table::Fd, sched::current::current_task};
use crate::{kernel::kpipe::KPipe, process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use alloc::sync::Arc;
use libkernel::{
error::{KernelError, Result},
@@ -6,13 +6,14 @@ use libkernel::{
};
pub async fn sys_sendfile(
ctx: &ProcessCtx,
out_fd: Fd,
in_fd: Fd,
_offset: TUA<u64>,
mut count: usize,
) -> Result<usize> {
let (reader, writer) = {
let task = current_task();
let task = ctx.shared();
let fds = task.fd_table.lock_save_irq();
let reader = fds.get(in_fd).ok_or(KernelError::BadFd)?;

View File

@@ -1,11 +1,12 @@
use super::at::stat::Stat;
use crate::memory::uaccess::copy_to_user;
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use libkernel::error::Result;
use libkernel::{error::KernelError, memory::address::TUA};
pub async fn sys_fstat(fd: Fd, statbuf: TUA<Stat>) -> Result<usize> {
let fd = current_task()
pub async fn sys_fstat(ctx: &ProcessCtx, fd: Fd, statbuf: TUA<Stat>) -> Result<usize> {
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -2,7 +2,7 @@ use crate::fs::VFS;
use crate::memory::uaccess::cstr::UserCStr;
use crate::memory::uaccess::{UserCopyable, copy_to_user};
use crate::process::fd_table::Fd;
use crate::sched::current::{current_task, current_task_shared};
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::sync::Arc;
use core::ffi::c_char;
use libkernel::error::KernelError;
@@ -65,19 +65,28 @@ async fn statfs_impl(inode: Arc<dyn Inode>) -> libkernel::error::Result<StatFs>
})
}
pub async fn sys_statfs(path: TUA<c_char>, stat: TUA<StatFs>) -> libkernel::error::Result<usize> {
pub async fn sys_statfs(
ctx: &ProcessCtx,
path: TUA<c_char>,
stat: TUA<StatFs>,
) -> libkernel::error::Result<usize> {
let mut buf = [0; 1024];
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let inode = VFS
.resolve_path(path, VFS.root_inode(), &current_task_shared())
.resolve_path(path, VFS.root_inode(), &ctx.shared().clone())
.await?;
let statfs = statfs_impl(inode).await?;
copy_to_user(stat, statfs).await?;
Ok(0)
}
pub async fn sys_fstatfs(fd: Fd, stat: TUA<StatFs>) -> libkernel::error::Result<usize> {
let fd = current_task()
pub async fn sys_fstatfs(
ctx: &ProcessCtx,
fd: Fd,
stat: TUA<StatFs>,
) -> libkernel::error::Result<usize> {
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,14 +1,14 @@
use libkernel::error::{KernelError, Result};
use crate::{fs::VFS, process::fd_table::Fd, sched::current::current_task_shared};
use crate::{fs::VFS, process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
pub async fn sys_sync() -> Result<usize> {
pub async fn sys_sync(_ctx: &ProcessCtx) -> Result<usize> {
VFS.sync_all().await?;
Ok(0)
}
pub async fn sys_syncfs(fd: Fd) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_syncfs(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
let task = ctx.shared().clone();
let inode = task
.fd_table
@@ -22,8 +22,8 @@ pub async fn sys_syncfs(fd: Fd) -> Result<usize> {
Ok(0)
}
pub async fn sys_fsync(fd: Fd) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fsync(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
let task = ctx.shared().clone();
let inode = task
.fd_table
@@ -37,8 +37,8 @@ pub async fn sys_fsync(fd: Fd) -> Result<usize> {
Ok(0)
}
pub async fn sys_fdatasync(fd: Fd) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fdatasync(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
let task = ctx.shared().clone();
let inode = task
.fd_table

View File

@@ -1,10 +1,7 @@
use core::ffi::c_char;
use crate::{
fs::VFS,
memory::uaccess::cstr::UserCStr,
process::fd_table::Fd,
sched::current::{current_task, current_task_shared},
fs::VFS, memory::uaccess::cstr::UserCStr, process::fd_table::Fd, sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
error::{KernelError, Result},
@@ -12,10 +9,10 @@ use libkernel::{
memory::address::TUA,
};
pub async fn sys_truncate(path: TUA<c_char>, new_size: usize) -> Result<usize> {
pub async fn sys_truncate(ctx: &ProcessCtx, path: TUA<c_char>, new_size: usize) -> Result<usize> {
let mut buf = [0; 1024];
let task = current_task_shared();
let task = ctx.shared().clone();
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let root = task.root.lock_save_irq().0.clone();
@@ -34,8 +31,9 @@ pub async fn sys_truncate(path: TUA<c_char>, new_size: usize) -> Result<usize> {
ops.truncate(ctx, new_size).await.map(|_| 0)
}
pub async fn sys_ftruncate(fd: Fd, new_size: usize) -> Result<usize> {
let fd = current_task()
pub async fn sys_ftruncate(ctx: &ProcessCtx, fd: Fd, new_size: usize) -> Result<usize> {
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,5 +1,5 @@
use crate::memory::uaccess::cstr::UserCStr;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::sync::OnceLock;
use crate::sync::SpinLock;
use alloc::string::{String, ToString};
@@ -17,10 +17,13 @@ pub fn hostname() -> &'static SpinLock<String> {
const HOST_NAME_MAX: usize = 64;
pub async fn sys_sethostname(name_ptr: TUA<c_char>, name_len: usize) -> Result<usize> {
pub async fn sys_sethostname(
ctx: &ProcessCtx,
name_ptr: TUA<c_char>,
name_len: usize,
) -> Result<usize> {
{
let task = current_task_shared();
let creds = task.creds.lock_save_irq();
let creds = ctx.shared().creds.lock_save_irq();
creds
.caps()
.check_capable(CapabilitiesFlags::CAP_SYS_ADMIN)?;

View File

@@ -1,4 +1,4 @@
use crate::{ArchImpl, arch::Arch, sched::current::current_task_shared};
use crate::{ArchImpl, arch::Arch, sched::syscall_ctx::ProcessCtx};
use core::sync::atomic::AtomicBool;
use libkernel::{
error::{KernelError, Result},
@@ -7,8 +7,14 @@ use libkernel::{
pub static CAD_ENABLED: AtomicBool = AtomicBool::new(false);
pub async fn sys_reboot(magic: u32, magic2: u32, op: u32, _arg: usize) -> Result<usize> {
current_task_shared()
pub async fn sys_reboot(
ctx: &ProcessCtx,
magic: u32,
magic2: u32,
op: u32,
_arg: usize,
) -> Result<usize> {
ctx.shared()
.creds
.lock_save_irq()
.caps()

View File

@@ -34,7 +34,7 @@ use libkernel::{
use log::{error, warn};
use process::ctx::UserCtx;
use sched::{
current::current_task_shared, sched_init, spawn_kernel_work, uspc_ret::dispatch_userspace_task,
sched_init, spawn_kernel_work, syscall_ctx::ProcessCtx, uspc_ret::dispatch_userspace_task,
};
extern crate alloc;
@@ -76,7 +76,7 @@ fn on_panic(info: &PanicInfo) -> ! {
ArchImpl::power_off();
}
async fn launch_init(mut opts: KOptions) {
async fn launch_init(mut ctx: ProcessCtx, mut opts: KOptions) {
let init = opts
.init
.unwrap_or_else(|| panic!("No init specified in kernel command line"));
@@ -141,7 +141,7 @@ async fn launch_init(mut opts: KOptions) {
.await
.expect("Unable to find init");
let task = current_task_shared();
let task = ctx.shared().clone();
// Ensure that the exec() call applies to init.
assert!(task.process.tgid.is_init());
@@ -186,7 +186,7 @@ async fn launch_init(mut opts: KOptions) {
init_args.append(&mut opts.init_args);
process::exec::kernel_exec(init.as_path(), inode, init_args, vec![])
process::exec::kernel_exec(&mut ctx, init.as_path(), inode, init_args, vec![])
.await
.expect("Could not launch init process");
}
@@ -238,7 +238,14 @@ pub fn kmain(args: String, ctx_frame: *mut UserCtx) {
let kopts = parse_args(&args);
spawn_kernel_work(launch_init(kopts));
{
// SAFETY: kmain is called prior to init being launched. Thefore, we
// will be the only access to `ctx` at this point.
let mut ctx = unsafe { ProcessCtx::from_current() };
let ctx2 = unsafe { ctx.clone() };
spawn_kernel_work(&mut ctx, launch_init(ctx2, kopts));
}
dispatch_userspace_task(ctx_frame);
}

View File

@@ -2,7 +2,7 @@ use core::convert::Infallible;
use libkernel::memory::address::VA;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
/// Handles the `brk` system call.
///
@@ -19,9 +19,8 @@ use crate::sched::current::current_task;
/// - If `addr` is 0, it returns the current break.
/// - On a successful resize, it returns the new break.
/// - On a failed resize, it returns the current, unchanged break.
pub async fn sys_brk(addr: VA) -> Result<usize, Infallible> {
let task = current_task();
let mut vm = task.vm.lock_save_irq();
pub async fn sys_brk(ctx: &ProcessCtx, addr: VA) -> Result<usize, Infallible> {
let mut vm = ctx.shared().vm.lock_save_irq();
// The query case `brk(0)` is special and is handled separately from modifications.
if addr.is_null() {

View File

@@ -2,7 +2,7 @@ use alloc::vec;
use alloc::vec::Vec;
use crate::memory::uaccess::copy_to_user_slice;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::memory::region::VirtMemoryRegion;
use libkernel::{
UserAddressSpace,
@@ -11,7 +11,7 @@ use libkernel::{
memory::address::{UA, VA},
};
pub async fn sys_mincore(start: u64, len: usize, vec: UA) -> Result<usize> {
pub async fn sys_mincore(ctx: &ProcessCtx, start: u64, len: usize, vec: UA) -> Result<usize> {
// addr must be a multiple of the system page size
// len must be > 0
let start_va = VA::from_value(start as usize);
@@ -35,8 +35,7 @@ pub async fn sys_mincore(start: u64, len: usize, vec: UA) -> Result<usize> {
let mut buf: Vec<u8> = vec![0; pages];
{
let task = current_task();
let mut vm_guard = task.vm.lock_save_irq();
let mut vm_guard = ctx.shared().vm.lock_save_irq();
let mm = vm_guard.mm_mut();
// Validate the entire region is covered by VMAs

View File

@@ -1,6 +1,6 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::{process::fd_table::Fd, sched::current::current_task};
use crate::{process::fd_table::Fd, sched::syscall_ctx::ProcessCtx};
use alloc::string::{String, ToString};
use libkernel::{
error::{KernelError, Result},
@@ -46,6 +46,7 @@ fn prot_to_perms(prot: u64) -> VMAPermissions {
/// A `Result` containing the starting address of the new mapping on success,
/// or a `KernelError` on failure.
pub async fn sys_mmap(
ctx: &ProcessCtx,
addr: u64,
len: u64,
prot: u64,
@@ -90,7 +91,8 @@ pub async fn sys_mmap(
(VMAreaKind::Anon, String::new())
} else {
// File-backed mapping: require a valid fd and use the provided offset.
let fd = current_task()
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -125,7 +127,7 @@ pub async fn sys_mmap(
};
// Lock the task and call the core memory manager to perform the mapping.
let new_mapping_addr = current_task().vm.lock_save_irq().mm_mut().mmap(
let new_mapping_addr = ctx.shared().vm.lock_save_irq().mm_mut().mmap(
address_request,
requested_len,
permissions,
@@ -136,10 +138,10 @@ pub async fn sys_mmap(
Ok(new_mapping_addr.value())
}
pub async fn sys_munmap(addr: VA, len: usize) -> Result<usize> {
pub async fn sys_munmap(ctx: &ProcessCtx, addr: VA, len: usize) -> Result<usize> {
let region = VirtMemoryRegion::new(addr, len);
let pages = current_task().vm.lock_save_irq().mm_mut().munmap(region)?;
let pages = ctx.shared().vm.lock_save_irq().mm_mut().munmap(region)?;
// Free any physical frames that were unmapped.
if !pages.is_empty() {
@@ -159,11 +161,11 @@ pub async fn sys_munmap(addr: VA, len: usize) -> Result<usize> {
Ok(0)
}
pub fn sys_mprotect(addr: VA, len: usize, prot: u64) -> Result<usize> {
pub fn sys_mprotect(ctx: &ProcessCtx, addr: VA, len: usize, prot: u64) -> Result<usize> {
let perms = prot_to_perms(prot);
let region = VirtMemoryRegion::new(addr, len);
current_task()
ctx.shared()
.vm
.lock_save_irq()
.mm_mut()

View File

@@ -2,18 +2,20 @@ use crate::fs::open_file::OpenFile;
use crate::memory::uaccess::{copy_from_user, copy_to_user, copy_to_user_slice};
use crate::net::SocketLen;
use crate::process::fd_table::Fd;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::error::KernelError;
use libkernel::fs::OpenFlags;
use libkernel::memory::address::{TUA, UA};
pub async fn sys_accept4(
ctx: &ProcessCtx,
fd: Fd,
addr: UA,
addrlen: TUA<SocketLen>,
_flags: i32,
) -> libkernel::error::Result<usize> {
let file = current_task_shared()
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)
@@ -29,7 +31,8 @@ pub async fn sys_accept4(
let new_socket = new_socket.as_file();
let open_file = OpenFile::new(new_socket, OpenFlags::empty());
let new_fd = current_task_shared()
let new_fd = ctx
.shared()
.fd_table
.lock_save_irq()
.insert(alloc::sync::Arc::new(open_file))?;
@@ -47,9 +50,10 @@ pub async fn sys_accept4(
}
pub async fn sys_accept(
ctx: &ProcessCtx,
fd: Fd,
addr: UA,
addrlen: TUA<SocketLen>,
) -> libkernel::error::Result<usize> {
sys_accept4(fd, addr, addrlen, 0).await
sys_accept4(ctx, fd, addr, addrlen, 0).await
}

View File

@@ -1,10 +1,16 @@
use crate::net::{SocketLen, parse_sockaddr};
use crate::process::fd_table::Fd;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::memory::address::UA;
pub async fn sys_bind(fd: Fd, addr: UA, addrlen: SocketLen) -> libkernel::error::Result<usize> {
let file = current_task()
pub async fn sys_bind(
ctx: &ProcessCtx,
fd: Fd,
addr: UA,
addrlen: SocketLen,
) -> libkernel::error::Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,9 +1,16 @@
use crate::net::{SocketLen, parse_sockaddr};
use crate::process::fd_table::Fd;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::memory::address::UA;
pub async fn sys_connect(fd: Fd, addr: UA, addrlen: SocketLen) -> libkernel::error::Result<usize> {
let file = crate::sched::current::current_task()
pub async fn sys_connect(
ctx: &ProcessCtx,
fd: Fd,
addr: UA,
addrlen: SocketLen,
) -> libkernel::error::Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,8 +1,10 @@
use crate::process::fd_table::Fd;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::error::KernelError;
pub async fn sys_listen(fd: Fd, backlog: i32) -> libkernel::error::Result<usize> {
let file = crate::sched::current::current_task()
pub async fn sys_listen(ctx: &ProcessCtx, fd: Fd, backlog: i32) -> libkernel::error::Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -2,10 +2,12 @@ use crate::memory::uaccess::{copy_from_user, copy_to_user, copy_to_user_slice};
use crate::net::sops::RecvFlags;
use crate::net::{SocketLen, parse_sockaddr};
use crate::process::fd_table::Fd;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::error::KernelError;
use libkernel::memory::address::{TUA, UA};
pub async fn sys_recvfrom(
ctx: &ProcessCtx,
fd: Fd,
buf: UA,
len: usize,
@@ -13,7 +15,8 @@ pub async fn sys_recvfrom(
addr: UA,
addrlen: TUA<SocketLen>,
) -> libkernel::error::Result<usize> {
let file = crate::sched::current::current_task()
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,6 +1,7 @@
use crate::net::sops::SendFlags;
use crate::net::{SocketLen, parse_sockaddr};
use crate::process::fd_table::Fd;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::error::Result;
use libkernel::memory::address::UA;
@@ -25,6 +26,7 @@ use libkernel::memory::address::UA;
const MSG_NOSIGNAL: i32 = 0x4000;
pub async fn sys_sendto(
ctx: &ProcessCtx,
fd: Fd,
buf: UA,
len: usize,
@@ -32,7 +34,8 @@ pub async fn sys_sendto(
addr: UA,
addrlen: SocketLen,
) -> Result<usize> {
let file = crate::sched::current::current_task()
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -1,8 +1,10 @@
use crate::net::ShutdownHow;
use crate::process::fd_table::Fd;
use crate::sched::syscall_ctx::ProcessCtx;
pub async fn sys_shutdown(fd: Fd, how: i32) -> libkernel::error::Result<usize> {
let file = crate::sched::current::current_task()
pub async fn sys_shutdown(ctx: &ProcessCtx, fd: Fd, how: i32) -> libkernel::error::Result<usize> {
let file = ctx
.shared()
.fd_table
.lock_save_irq()
.get(fd)

View File

@@ -3,7 +3,7 @@ use crate::fs::open_file::OpenFile;
use crate::net::tcp::TcpSocket;
use crate::net::unix::UnixSocket;
use crate::net::{AF_INET, AF_UNIX, IPPROTO_TCP, SOCK_DGRAM, SOCK_SEQPACKET, SOCK_STREAM};
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use alloc::boxed::Box;
use alloc::sync::Arc;
use libkernel::error::KernelError;
@@ -12,7 +12,12 @@ use libkernel::fs::OpenFlags;
pub const CLOSE_ON_EXEC: i32 = 0x80000;
pub const NONBLOCK: i32 = 0x800;
pub async fn sys_socket(domain: i32, type_: i32, protocol: i32) -> libkernel::error::Result<usize> {
pub async fn sys_socket(
ctx: &ProcessCtx,
domain: i32,
type_: i32,
protocol: i32,
) -> libkernel::error::Result<usize> {
let _close_on_exec = (type_ & CLOSE_ON_EXEC) != 0;
let _nonblock = (type_ & NONBLOCK) != 0;
// Mask out flags
@@ -28,7 +33,8 @@ pub async fn sys_socket(domain: i32, type_: i32, protocol: i32) -> libkernel::er
};
// TODO: Correct flags
let open_file = OpenFile::new(new_socket, OpenFlags::empty());
let fd = current_task_shared()
let fd = ctx
.shared()
.fd_table
.lock_save_irq()
.insert(Arc::new(open_file))?;

View File

@@ -3,7 +3,7 @@ use crate::{
UserCopyable, copy_from_user, copy_obj_array_from_user, copy_objs_to_user, copy_to_user,
},
process::TASK_LIST,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
error::{KernelError, Result},
@@ -49,11 +49,15 @@ impl CapUserData {
unsafe impl UserCopyable for CapUserHeader {}
unsafe impl UserCopyable for CapUserData {}
pub async fn sys_capget(hdrp: TUA<CapUserHeader>, datap: TUA<CapUserData>) -> Result<usize> {
pub async fn sys_capget(
ctx: &ProcessCtx,
hdrp: TUA<CapUserHeader>,
datap: TUA<CapUserData>,
) -> Result<usize> {
let mut header = copy_from_user(hdrp).await?;
let task = if header.pid == 0 {
current_task_shared()
ctx.shared().clone()
} else {
TASK_LIST
.lock_save_irq()
@@ -83,12 +87,16 @@ pub async fn sys_capget(hdrp: TUA<CapUserHeader>, datap: TUA<CapUserData>) -> Re
Ok(0)
}
pub async fn sys_capset(hdrp: TUA<CapUserHeader>, datap: TUA<CapUserData>) -> Result<usize> {
pub async fn sys_capset(
ctx: &ProcessCtx,
hdrp: TUA<CapUserHeader>,
datap: TUA<CapUserData>,
) -> Result<usize> {
let mut header = copy_from_user(hdrp).await?;
let caller_caps = current_task_shared().creds.lock_save_irq().caps();
let caller_caps = ctx.shared().creds.lock_save_irq().caps();
let task = if header.pid == 0 {
current_task_shared()
ctx.shared().clone()
} else {
caller_caps.check_capable(CapabilitiesFlags::CAP_SETPCAP)?;
TASK_LIST

View File

@@ -1,11 +1,15 @@
use super::owned::OwnedTask;
use super::ptrace::{PTrace, TracePoint, ptrace_stop};
use super::{ctx::Context, thread_group::signal::SigSet};
use super::{
ctx::Context,
thread_group::signal::{AtomicSigSet, SigSet},
};
use crate::memory::uaccess::copy_to_user;
use crate::sched::sched_task::Work;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::{
process::{TASK_LIST, Task},
sched::{self, current::current_task},
sched::{self},
sync::SpinLock,
};
use alloc::boxed::Box;
@@ -50,6 +54,7 @@ bitflags! {
}
pub async fn sys_clone(
ctx: &ProcessCtx,
flags: u32,
newsp: UA,
parent_tidptr: TUA<u32>,
@@ -60,10 +65,10 @@ pub async fn sys_clone(
// TODO: differentiate between `TracePoint::Fork`, `TracePoint::Clone` and
// `TracePoint::VFork`.
let should_trace_new_tsk = ptrace_stop(TracePoint::Fork).await;
let should_trace_new_tsk = ptrace_stop(ctx, TracePoint::Fork).await;
let new_task = {
let current_task = current_task();
let current_task = ctx.task();
let mut user_ctx = *current_task.ctx.user();
@@ -141,20 +146,20 @@ pub async fn sys_clone(
let creds = current_task.creds.lock_save_irq().clone();
let new_sigmask = current_task.sig_mask;
let new_sigmask = AtomicSigSet::new(current_task.sig_mask.load());
let initial_signals = if should_trace_new_tsk {
// When we want to trace a new task through one of
// PTRACE_O_TRACE{FORK,VFORK,CLONE}, stop the child as soon as
// it is created.
AtomicSigSet::new(SigSet::SIGSTOP)
} else {
AtomicSigSet::empty()
};
OwnedTask {
ctx: Context::from_user_ctx(user_ctx),
priority: current_task.priority,
sig_mask: new_sigmask,
pending_signals: if should_trace_new_tsk {
// When we want to trace a new task through one of
// PTRACE_O_TRACE{FORK,VFORK,CLONE}, stop the child as soon as
// it is created.
SigSet::SIGSTOP
} else {
SigSet::empty()
},
robust_list: None,
child_tid_ptr: if !child_tidptr.is_null() {
Some(child_tidptr)
@@ -171,6 +176,8 @@ pub async fn sys_clone(
root,
creds: SpinLock::new(creds),
ptrace: SpinLock::new(ptrace),
sig_mask: new_sigmask,
pending_signals: initial_signals,
utime: AtomicUsize::new(0),
stime: AtomicUsize::new(0),
last_account: AtomicUsize::new(0),

View File

@@ -3,7 +3,7 @@ use core::convert::Infallible;
use crate::process::thread_group::Sid;
use crate::{
memory::uaccess::{UserCopyable, copy_to_user},
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
error::Result,
@@ -70,48 +70,53 @@ impl Credentials {
}
}
pub fn sys_getuid() -> core::result::Result<usize, Infallible> {
let uid: u32 = current_task().creds.lock_save_irq().uid().into();
pub fn sys_getuid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
let uid: u32 = ctx.shared().creds.lock_save_irq().uid().into();
Ok(uid as _)
}
pub fn sys_geteuid() -> core::result::Result<usize, Infallible> {
let uid: u32 = current_task().creds.lock_save_irq().euid().into();
pub fn sys_geteuid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
let uid: u32 = ctx.shared().creds.lock_save_irq().euid().into();
Ok(uid as _)
}
pub fn sys_getgid() -> core::result::Result<usize, Infallible> {
let gid: u32 = current_task().creds.lock_save_irq().gid().into();
pub fn sys_getgid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
let gid: u32 = ctx.shared().creds.lock_save_irq().gid().into();
Ok(gid as _)
}
pub fn sys_getegid() -> core::result::Result<usize, Infallible> {
let gid: u32 = current_task().creds.lock_save_irq().egid().into();
pub fn sys_getegid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
let gid: u32 = ctx.shared().creds.lock_save_irq().egid().into();
Ok(gid as _)
}
pub fn sys_setfsuid(_new_id: usize) -> core::result::Result<usize, Infallible> {
pub fn sys_setfsuid(ctx: &ProcessCtx, _new_id: usize) -> core::result::Result<usize, Infallible> {
// Return the uid. This syscall is deprecated.
sys_getuid()
sys_getuid(ctx)
}
pub fn sys_setfsgid(_new_id: usize) -> core::result::Result<usize, Infallible> {
pub fn sys_setfsgid(ctx: &ProcessCtx, _new_id: usize) -> core::result::Result<usize, Infallible> {
// Return the gid. This syscall is deprecated.
sys_getgid()
sys_getgid(ctx)
}
pub fn sys_gettid() -> core::result::Result<usize, Infallible> {
let tid: u32 = current_task().tid.0;
pub fn sys_gettid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
let tid: u32 = ctx.shared().tid.0;
Ok(tid as _)
}
pub async fn sys_getresuid(ruid: TUA<Uid>, euid: TUA<Uid>, suid: TUA<Uid>) -> Result<usize> {
let creds = current_task().creds.lock_save_irq().clone();
pub async fn sys_getresuid(
ctx: &ProcessCtx,
ruid: TUA<Uid>,
euid: TUA<Uid>,
suid: TUA<Uid>,
) -> Result<usize> {
let creds = ctx.shared().creds.lock_save_irq().clone();
copy_to_user(ruid, creds.uid).await?;
copy_to_user(euid, creds.euid).await?;
@@ -120,8 +125,13 @@ pub async fn sys_getresuid(ruid: TUA<Uid>, euid: TUA<Uid>, suid: TUA<Uid>) -> Re
Ok(0)
}
pub async fn sys_getresgid(rgid: TUA<Gid>, egid: TUA<Gid>, sgid: TUA<Gid>) -> Result<usize> {
let creds = current_task().creds.lock_save_irq().clone();
pub async fn sys_getresgid(
ctx: &ProcessCtx,
rgid: TUA<Gid>,
egid: TUA<Gid>,
sgid: TUA<Gid>,
) -> Result<usize> {
let creds = ctx.shared().creds.lock_save_irq().clone();
copy_to_user(rgid, creds.gid).await?;
copy_to_user(egid, creds.egid).await?;
@@ -130,14 +140,14 @@ pub async fn sys_getresgid(rgid: TUA<Gid>, egid: TUA<Gid>, sgid: TUA<Gid>) -> Re
Ok(0)
}
pub async fn sys_getsid() -> Result<usize> {
let sid: u32 = current_task().process.sid.lock_save_irq().value();
pub async fn sys_getsid(ctx: &ProcessCtx) -> Result<usize> {
let sid: u32 = ctx.shared().process.sid.lock_save_irq().value();
Ok(sid as _)
}
pub async fn sys_setsid() -> Result<usize> {
let process = current_task().process.clone();
pub async fn sys_setsid(ctx: &ProcessCtx) -> Result<usize> {
let process = ctx.shared().process.clone();
let new_sid = process.tgid.value();
*process.sid.lock_save_irq() = Sid(new_sid);

View File

@@ -1,7 +1,7 @@
use crate::ArchImpl;
use crate::process::Comm;
use crate::process::ptrace::{TracePoint, ptrace_stop};
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::{
arch::Arch,
fs::VFS,
@@ -10,7 +10,6 @@ use crate::{
uaccess::{copy_from_user, cstr::UserCStr},
},
process::{ctx::Context, thread_group::signal::SignalActionState},
sched::current::current_task,
};
use alloc::borrow::ToOwned;
use alloc::{string::String, vec};
@@ -85,6 +84,7 @@ fn process_prog_headers<E: Endian>(
}
async fn exec_elf(
ctx: &mut ProcessCtx,
inode: Arc<dyn Inode>,
path: &Path,
argv: Vec<String>,
@@ -164,7 +164,7 @@ async fn exec_elf(
auxv.push(LINKER_BIAS as _);
// Returns the entry address of the interp program.
process_interp(path, &mut vmas).await?
process_interp(ctx, path, &mut vmas).await?
} else {
// Otherwise, it's just the binary itself.
main_entry
@@ -184,7 +184,7 @@ async fn exec_elf(
let stack_ptr = setup_user_stack(&mut mem_map, &argv, &envp, auxv)?;
// We are now committed to the exec. Inform ptrace.
ptrace_stop(TracePoint::Exec).await;
ptrace_stop(ctx, TracePoint::Exec).await;
let user_ctx = ArchImpl::new_user_context(entry_addr, stack_ptr);
let mut vm = ProcessVM::from_map(mem_map);
@@ -198,7 +198,7 @@ async fn exec_elf(
let new_comm = argv.first().map(|s| Comm::new(s.as_str()));
{
let mut current_task = current_task();
let current_task = ctx.task_mut();
if let Some(new_comm) = new_comm {
*current_task.comm.lock_save_irq() = new_comm;
@@ -210,15 +210,16 @@ async fn exec_elf(
}
// Close all the CLOEXEC FDs.
let mut fd_table = current_task().fd_table.lock_save_irq().clone();
let mut fd_table = ctx.shared().fd_table.lock_save_irq().clone();
fd_table.close_cloexec_entries().await;
*current_task().fd_table.lock_save_irq() = fd_table;
*current_task().process.executable.lock_save_irq() = Some(path.to_owned());
*ctx.shared().fd_table.lock_save_irq() = fd_table;
*ctx.shared().process.executable.lock_save_irq() = Some(path.to_owned());
Ok(())
}
async fn exec_script(
ctx: &mut ProcessCtx,
path: &Path,
inode: Arc<dyn Inode>,
argv: Vec<String>,
@@ -247,16 +248,17 @@ async fn exec_script(
new_argv.extend(argv.into_iter().skip(1)); // Skip original argv[0]
// Resolve interpreter inode
let interp_path = Path::new(interp_path);
let task = current_task_shared();
let task = ctx.shared();
let interp_inode = VFS
.resolve_path(interp_path, VFS.root_inode(), &task)
.resolve_path(interp_path, VFS.root_inode(), task)
.await?;
// Execute interpreter
exec_elf(interp_inode, interp_path, new_argv, envp).await?;
exec_elf(ctx, interp_inode, interp_path, new_argv, envp).await?;
Ok(())
}
pub async fn kernel_exec(
ctx: &mut ProcessCtx,
path: &Path,
inode: Arc<dyn Inode>,
argv: Vec<String>,
@@ -265,9 +267,9 @@ pub async fn kernel_exec(
let mut buf = [0u8; 4];
inode.read_at(0, &mut buf).await?;
if buf == [0x7F, b'E', b'L', b'F'] {
exec_elf(inode, path, argv, envp).await
exec_elf(ctx, inode, path, argv, envp).await
} else if buf.starts_with(b"#!") {
exec_script(path, inode, argv, envp).await
exec_script(ctx, path, inode, argv, envp).await
} else {
Err(ExecError::InvalidElfFormat.into())
}
@@ -386,11 +388,15 @@ fn setup_user_stack(
// Dynamic linker path: map PT_INTERP interpreter and return start address of
// the interpreter program.
async fn process_interp(interp_path: String, vmas: &mut Vec<VMArea>) -> Result<VA> {
async fn process_interp(
ctx: &ProcessCtx,
interp_path: String,
vmas: &mut Vec<VMArea>,
) -> Result<VA> {
// Resolve interpreter path from root; this assumes interp_path is absolute.
let task = current_task_shared();
let task = ctx.shared();
let path = Path::new(&interp_path);
let interp_inode = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
let interp_inode = VFS.resolve_path(path, VFS.root_inode(), task).await?;
// Parse interpreter ELF header
let mut hdr_buf = [0u8; core::mem::size_of::<elf::FileHeader64<LittleEndian>>()];
@@ -425,11 +431,12 @@ async fn process_interp(interp_path: String, vmas: &mut Vec<VMArea>) -> Result<V
}
pub async fn sys_execve(
ctx: &mut ProcessCtx,
path: TUA<c_char>,
mut usr_argv: TUA<TUA<c_char>>,
mut usr_env: TUA<TUA<c_char>>,
) -> Result<usize> {
let task = current_task_shared();
let task = ctx.shared().clone();
let mut buf = [0; 1024];
let mut argv = Vec::new();
let mut envp = Vec::new();
@@ -461,7 +468,7 @@ pub async fn sys_execve(
let path = Path::new(UserCStr::from_ptr(path).copy_from_user(&mut buf).await?);
let inode = VFS.resolve_path(path, VFS.root_inode(), &task).await?;
kernel_exec(path, inode, argv, envp).await?;
kernel_exec(ctx, path, inode, argv, envp).await?;
Ok(0)
}

View File

@@ -1,18 +1,18 @@
use super::{
TASK_LIST,
TASK_LIST, Task,
ptrace::{TracePoint, ptrace_stop},
thread_group::{ProcessState, Tgid, ThreadGroup, signal::SigId, wait::ChildState},
threading::futex::{self, key::FutexKey},
};
use crate::sched::{self, current::current_task};
use crate::{memory::uaccess::copy_to_user, sched::current::current_task_shared};
use crate::memory::uaccess::copy_to_user;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::sched::{self};
use alloc::vec::Vec;
use libkernel::error::Result;
use log::warn;
use ringbuf::Arc;
pub fn do_exit_group(exit_code: ChildState) {
let task = current_task();
pub fn do_exit_group(task: &Arc<Task>, exit_code: ChildState) {
let process = Arc::clone(&task.process);
if process.tgid.is_init() {
@@ -34,7 +34,6 @@ pub fn do_exit_group(exit_code: ChildState) {
if *process_state != ProcessState::Running {
// We're already on our way out. Just kill this thread.
drop(process_state);
drop(task);
sched::current_work().state.finish();
return;
}
@@ -88,44 +87,46 @@ pub fn do_exit_group(exit_code: ChildState) {
.set_signal(SigId::SIGCHLD);
// 5. This thread is now finished.
drop(task);
sched::current_work().state.finish();
// NOTE: that the scheduler will never execute the task again since it's
// state is set to Finished.
}
pub fn kernel_exit_with_signal(signal: SigId, core: bool) {
do_exit_group(ChildState::SignalExit { signal, core });
pub fn kernel_exit_with_signal(task: Arc<Task>, signal: SigId, core: bool) {
do_exit_group(&task, ChildState::SignalExit { signal, core });
}
pub async fn sys_exit_group(exit_code: usize) -> Result<usize> {
ptrace_stop(TracePoint::Exit).await;
pub async fn sys_exit_group(ctx: &ProcessCtx, exit_code: usize) -> Result<usize> {
ptrace_stop(ctx, TracePoint::Exit).await;
do_exit_group(ChildState::NormalExit {
code: exit_code as _,
});
do_exit_group(
ctx.shared(),
ChildState::NormalExit {
code: exit_code as _,
},
);
Ok(0)
}
pub async fn sys_exit(exit_code: usize) -> Result<usize> {
pub async fn sys_exit(ctx: &mut ProcessCtx, exit_code: usize) -> Result<usize> {
// Honour CLONE_CHILD_CLEARTID: clear the user TID word and futex-wake any waiters.
let ptr = current_task().child_tid_ptr.take();
let ptr = ctx.task_mut().child_tid_ptr.take();
ptrace_stop(TracePoint::Exit).await;
ptrace_stop(ctx, TracePoint::Exit).await;
if let Some(ptr) = ptr {
copy_to_user(ptr, 0u32).await?;
if let Ok(key) = FutexKey::new_shared(ptr) {
if let Ok(key) = FutexKey::new_shared(ctx, ptr) {
futex::wake_key(1, key, u32::MAX);
} else {
warn!("Failed to get futex wake key on sys_exit");
}
}
let task = current_task_shared();
let task = ctx.shared();
let process = Arc::clone(&task.process);
let mut tasks_lock = process.tasks.lock_save_irq();
@@ -146,9 +147,12 @@ pub async fn sys_exit(exit_code: usize) -> Result<usize> {
// we've established we're the only thread and we're executing a
// sys_exit, there can absolutely be no way that a new thread can be
// spawned on this process while the thread_lock is released.
do_exit_group(ChildState::NormalExit {
code: exit_code as _,
});
do_exit_group(
task,
ChildState::NormalExit {
code: exit_code as _,
},
);
Ok(0)
} else {

View File

@@ -1,4 +1,4 @@
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::{
error::{KernelError, Result},
fs::OpenFlags,
@@ -6,8 +6,8 @@ use libkernel::{
use super::{Fd, FdFlags, FileDescriptorEntry};
pub fn dup_fd(fd: Fd, min_fd: Option<Fd>) -> Result<Fd> {
let task = current_task();
pub fn dup_fd(ctx: &ProcessCtx, fd: Fd, min_fd: Option<Fd>) -> Result<Fd> {
let task = ctx.shared();
let mut files = task.fd_table.lock_save_irq();
let file = files.get(fd).ok_or(KernelError::BadFd)?;
@@ -20,13 +20,13 @@ pub fn dup_fd(fd: Fd, min_fd: Option<Fd>) -> Result<Fd> {
Ok(new_fd)
}
pub fn sys_dup(fd: Fd) -> Result<usize> {
let new_fd = dup_fd(fd, None)?;
pub fn sys_dup(ctx: &ProcessCtx, fd: Fd) -> Result<usize> {
let new_fd = dup_fd(ctx, fd, None)?;
Ok(new_fd.as_raw() as _)
}
pub fn sys_dup3(oldfd: Fd, newfd: Fd, flags: u32) -> Result<usize> {
pub fn sys_dup3(ctx: &ProcessCtx, oldfd: Fd, newfd: Fd, flags: u32) -> Result<usize> {
if oldfd == newfd {
return Err(KernelError::InvalidValue);
}
@@ -38,7 +38,7 @@ pub fn sys_dup3(oldfd: Fd, newfd: Fd, flags: u32) -> Result<usize> {
return Err(KernelError::InvalidValue);
}
let task = current_task();
let task = ctx.shared();
let mut files = task.fd_table.lock_save_irq();
let old_file = files.get(oldfd).ok_or(KernelError::BadFd)?;

View File

@@ -1,6 +1,6 @@
use super::Fd;
use crate::process::fd_table::dup::dup_fd;
use crate::{process::fd_table::FdFlags, sched::current::current_task_shared};
use crate::{process::fd_table::FdFlags, sched::syscall_ctx::ProcessCtx};
use bitflags::Flags;
use libkernel::error::{KernelError, Result};
use libkernel::fs::OpenFlags;
@@ -11,11 +11,11 @@ const F_SETFD: u32 = 2; // Set file descriptor flags.
const F_GETFL: u32 = 3; // Get file status flags.
const F_SETFL: u32 = 4; // Set file status flags.
pub async fn sys_fcntl(fd: Fd, op: u32, arg: usize) -> Result<usize> {
let task = current_task_shared();
pub async fn sys_fcntl(ctx: &ProcessCtx, fd: Fd, op: u32, arg: usize) -> Result<usize> {
let task = ctx.shared();
match op {
F_DUPFD => dup_fd(fd, Some(Fd(arg as i32))).map(|new_fd| new_fd.as_raw() as _),
F_DUPFD => dup_fd(ctx, fd, Some(Fd(arg as i32))).map(|new_fd| new_fd.as_raw() as _),
F_GETFD => {
let fds = task.fd_table.lock_save_irq();
let fd = fds

View File

@@ -12,7 +12,7 @@ use crate::{
UserCopyable, copy_from_user, copy_obj_array_from_user, copy_objs_to_user, copy_to_user,
},
process::thread_group::signal::SigSet,
sched::current::current_task_shared,
sched::syscall_ctx::ProcessCtx,
};
use super::Fd;
@@ -61,6 +61,7 @@ unsafe impl UserCopyable for FdSet {}
// TODO: writefds, exceptfds, timeout.
pub async fn sys_pselect6(
ctx: &ProcessCtx,
max: i32,
readfds: TUA<FdSet>,
_writefds: TUA<FdSet>,
@@ -68,7 +69,7 @@ pub async fn sys_pselect6(
timeout: TUA<TimeSpec>,
_mask: TUA<SigSet>,
) -> Result<usize> {
let task = current_task_shared();
let task = ctx.shared();
let mut read_fd_set = copy_from_user(readfds).await?;
@@ -162,13 +163,14 @@ pub struct PollFd {
unsafe impl UserCopyable for PollFd {}
pub async fn sys_ppoll(
ctx: &ProcessCtx,
ufds: TUA<PollFd>,
nfds: u32,
timeout: TUA<TimeSpec>,
_sigmask: TUA<SigSet>,
_sigset_len: usize,
) -> Result<usize> {
let task = current_task_shared();
let task = ctx.shared();
let mut poll_fds = copy_obj_array_from_user(ufds, nfds as _).await?;

View File

@@ -29,6 +29,7 @@ use libkernel::{
},
};
use ptrace::PTrace;
use thread_group::signal::{AtomicSigSet, SigId};
use thread_group::{Tgid, ThreadGroup};
pub mod caps;
@@ -156,6 +157,8 @@ pub struct Task {
pub creds: SpinLock<Credentials>,
pub fd_table: Arc<SpinLock<FileDescriptorTable>>,
pub ptrace: SpinLock<PTrace>,
pub sig_mask: AtomicSigSet,
pub pending_signals: AtomicSigSet,
pub utime: AtomicUsize,
pub stime: AtomicUsize,
pub last_account: AtomicUsize,
@@ -174,6 +177,35 @@ impl Task {
self.tid
}
/// Raise a signal on this specific task (thread-directed).
pub fn raise_task_signal(&self, signal: SigId) {
self.pending_signals.insert(signal.into());
}
/// Check for a pending signal on this task or its process, respecting the
/// signal mask.
pub fn peek_signal(&self) -> Option<SigId> {
let mask = self.sig_mask.load();
self.pending_signals.peek_signal(mask).or_else(|| {
self.process
.pending_signals
.lock_save_irq()
.peek_signal(mask)
})
}
/// Take a pending signal from this task or its process, respecting the
/// signal mask.
pub fn take_signal(&self) -> Option<SigId> {
let mask = self.sig_mask.load();
self.pending_signals.take_signal(mask).or_else(|| {
self.process
.pending_signals
.lock_save_irq()
.take_signal(mask)
})
}
/// Return a new descriptor that uniquely represents this task in the
/// system.
pub fn descriptor(&self) -> TaskDescriptor {

View File

@@ -7,7 +7,7 @@ use super::{
thread_group::{
Tgid,
builder::ThreadGroupBuilder,
signal::{SigId, SigSet, SignalActionState},
signal::{AtomicSigSet, SignalActionState},
},
threading::RobustListHead,
};
@@ -32,8 +32,6 @@ use libkernel::{
/// between other tasks and can therefore be access lock-free.
pub struct OwnedTask {
pub ctx: Context,
pub sig_mask: SigSet,
pub pending_signals: SigSet,
pub priority: Option<i8>,
pub robust_list: Option<TUA<RobustListHead>>,
pub child_tid_ptr: Option<TUA<u32>>,
@@ -78,13 +76,13 @@ impl OwnedTask {
utime: AtomicUsize::new(0),
stime: AtomicUsize::new(0),
last_account: AtomicUsize::new(0),
pending_signals: AtomicSigSet::empty(),
sig_mask: AtomicSigSet::empty(),
};
Self {
priority: Some(i8::MIN),
ctx: Context::from_user_ctx(user_ctx),
sig_mask: SigSet::empty(),
pending_signals: SigSet::empty(),
robust_list: None,
child_tid_ptr: None,
t_shared: Arc::new(task),
@@ -108,11 +106,11 @@ impl OwnedTask {
last_account: AtomicUsize::new(0),
utime: AtomicUsize::new(0),
stime: AtomicUsize::new(0),
pending_signals: AtomicSigSet::empty(),
sig_mask: AtomicSigSet::empty(),
};
Self {
pending_signals: SigSet::empty(),
sig_mask: SigSet::empty(),
priority: None,
ctx: Context::from_user_ctx(<ArchImpl as Arch>::new_user_context(
VA::null(),
@@ -134,32 +132,6 @@ impl OwnedTask {
self.priority = Some(priority);
}
pub fn raise_task_signal(&mut self, signal: SigId) {
self.pending_signals.insert(signal.into());
}
/// Take a pending signal from this task's pending signal queue, or the
/// process's pending signal queue, while repsecting the signal mask.
pub fn take_signal(&mut self) -> Option<SigId> {
self.pending_signals.take_signal(self.sig_mask).or_else(|| {
self.process
.pending_signals
.lock_save_irq()
.take_signal(self.sig_mask)
})
}
/// Check for a pending signal from this task's pending signal queue, or the
/// process's pending signal queue, while repsecting the signal mask.
pub fn peek_signal(&self) -> Option<SigId> {
self.pending_signals.peek_signal(self.sig_mask).or_else(|| {
self.process
.pending_signals
.lock_save_irq()
.peek_signal(self.sig_mask)
})
}
pub fn update_accounting(&self, curr_time: Option<Instant>) {
let now = curr_time.unwrap_or_else(|| now().unwrap());
if self.in_syscall {

View File

@@ -1,7 +1,7 @@
use crate::memory::uaccess::copy_to_user_slice;
use crate::memory::uaccess::cstr::UserCStr;
use crate::process::Comm;
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use bitflags::Flags;
use core::ffi::c_char;
use libkernel::error::{KernelError, Result};
@@ -38,39 +38,39 @@ impl TryFrom<u64> for AmbientCapOp {
}
}
fn pr_read_capbset(what: usize) -> Result<usize> {
fn pr_read_capbset(ctx: &ProcessCtx, what: usize) -> Result<usize> {
let what = CapabilitiesFlags::from_bits(1u64 << what).ok_or(KernelError::InvalidValue)?;
let task = current_task_shared();
let task = ctx.shared();
let creds = task.creds.lock_save_irq();
Ok(creds.caps.bounding().contains(what) as _)
}
async fn pr_drop_capbset(what: usize) -> Result<usize> {
async fn pr_drop_capbset(ctx: &ProcessCtx, what: usize) -> Result<usize> {
let what = CapabilitiesFlags::from_bits(1u64 << what).ok_or(KernelError::InvalidValue)?;
let task = current_task_shared();
let task = ctx.shared();
let mut creds = task.creds.lock_save_irq();
creds.caps.bounding_mut().remove(what);
Ok(0)
}
async fn pr_get_name(str: TUA<c_char>) -> Result<usize> {
let task = current_task_shared();
async fn pr_get_name(ctx: &ProcessCtx, str: TUA<c_char>) -> Result<usize> {
let task = ctx.shared();
let comm = task.comm.lock_save_irq().0;
copy_to_user_slice(&comm, str.to_untyped()).await?;
Ok(0)
}
async fn pr_set_name(str: TUA<c_char>) -> Result<usize> {
let task = current_task_shared();
async fn pr_set_name(ctx: &ProcessCtx, str: TUA<c_char>) -> Result<usize> {
let task = ctx.shared();
let mut buf: [u8; 64] = [0; 64];
let name = UserCStr::from_ptr(str).copy_from_user(&mut buf).await?;
*task.comm.lock_save_irq() = Comm::new(name);
Ok(0)
}
async fn pr_cap_ambient(op: u64, arg1: u64) -> Result<usize> {
async fn pr_cap_ambient(ctx: &ProcessCtx, op: u64, arg1: u64) -> Result<usize> {
let op = AmbientCapOp::try_from(op)?;
let task = current_task_shared();
let task = ctx.shared();
match op {
AmbientCapOp::ClearAll => {
let mut creds = task.creds.lock_save_irq();
@@ -107,15 +107,15 @@ async fn pr_cap_ambient(op: u64, arg1: u64) -> Result<usize> {
}
}
pub async fn sys_prctl(op: i32, arg1: u64, arg2: u64) -> Result<usize> {
pub async fn sys_prctl(ctx: &ProcessCtx, op: i32, arg1: u64, arg2: u64) -> Result<usize> {
match op {
PR_SET_NAME => pr_set_name(TUA::from_value(arg1 as usize)).await,
PR_GET_NAME => pr_get_name(TUA::from_value(arg1 as usize)).await,
PR_CAPBSET_READ => pr_read_capbset(arg1 as usize),
PR_CAPBSET_DROP => pr_drop_capbset(arg1 as usize).await,
PR_SET_NAME => pr_set_name(ctx, TUA::from_value(arg1 as usize)).await,
PR_GET_NAME => pr_get_name(ctx, TUA::from_value(arg1 as usize)).await,
PR_CAPBSET_READ => pr_read_capbset(ctx, arg1 as usize),
PR_CAPBSET_DROP => pr_drop_capbset(ctx, arg1 as usize).await,
PR_GET_SECUREBITS => Ok(0),
PR_GET_NO_NEW_PRIVS => Ok(0),
PR_CAP_AMBIENT => pr_cap_ambient(arg1, arg2).await,
PR_CAP_AMBIENT => pr_cap_ambient(ctx, arg1, arg2).await,
_ => todo!("prctl op: {}", op),
}
}

View File

@@ -4,7 +4,7 @@ use crate::{
fs::syscalls::iov::IoVec,
memory::uaccess::{copy_from_user, copy_to_user},
process::{TASK_LIST, thread_group::signal::SigId},
sched::current::{current_task, current_task_shared},
sched::syscall_ctx::ProcessCtx,
};
use alloc::sync::Arc;
use bitflags::Flags;
@@ -255,8 +255,8 @@ impl TryFrom<i32> for PtraceOperation {
}
}
pub async fn ptrace_stop(point: TracePoint) -> bool {
let task_sh = current_task_shared();
pub async fn ptrace_stop(ctx: &ProcessCtx, point: TracePoint) -> bool {
let task_sh = ctx.shared();
let mut notified = false;
poll_fn(|cx| {
@@ -266,7 +266,7 @@ pub async fn ptrace_stop(point: TracePoint) -> bool {
// First poll: hit the trace point, set waker, then notify.
// The waker must be set *before* notification so the tracer
// can always find it when it does PTRACE_SYSCALL/CONT.
if !ptrace.hit_trace_point(point, current_task().ctx.user()) {
if !ptrace.hit_trace_point(point, ctx.task().ctx.user()) {
return Poll::Ready(false);
}
@@ -287,11 +287,11 @@ pub async fn ptrace_stop(point: TracePoint) -> bool {
.await
}
pub async fn sys_ptrace(op: i32, pid: u64, addr: UA, data: UA) -> Result<usize> {
pub async fn sys_ptrace(ctx: &ProcessCtx, op: i32, pid: u64, addr: UA, data: UA) -> Result<usize> {
let op = PtraceOperation::try_from(op)?;
if op == PtraceOperation::TraceMe {
let current_task = current_task_shared();
let current_task = ctx.shared();
let mut ptrace = current_task.ptrace.lock_save_irq();
ptrace.state = Some(PTraceState::Running);

View File

@@ -1,6 +1,6 @@
use libkernel::error::{KernelError, Result};
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use core::convert::Infallible;
use super::{Pgid, Tgid, ThreadGroup};
@@ -8,12 +8,13 @@ use super::{Pgid, Tgid, ThreadGroup};
/// Userspace `pid_t` type.
pub type PidT = i32;
pub fn sys_getpid() -> core::result::Result<usize, Infallible> {
Ok(current_task().process.tgid.value() as _)
pub fn sys_getpid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
Ok(ctx.shared().process.tgid.value() as _)
}
pub fn sys_getppid() -> core::result::Result<usize, Infallible> {
Ok(current_task()
pub fn sys_getppid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible> {
Ok(ctx
.shared()
.process
.parent
.lock_save_irq()
@@ -23,9 +24,9 @@ pub fn sys_getppid() -> core::result::Result<usize, Infallible> {
.unwrap_or(0) as _)
}
pub fn sys_getpgid(pid: PidT) -> Result<usize> {
pub fn sys_getpgid(ctx: &ProcessCtx, pid: PidT) -> Result<usize> {
let pgid = if pid == 0 {
*current_task().process.pgid.lock_save_irq()
*ctx.shared().process.pgid.lock_save_irq()
} else if let Some(tg) = ThreadGroup::get(Tgid::from_pid_t(pid)) {
*tg.pgid.lock_save_irq()
} else {
@@ -35,9 +36,9 @@ pub fn sys_getpgid(pid: PidT) -> Result<usize> {
Ok(pgid.value() as _)
}
pub fn sys_setpgid(pid: PidT, pgid: Pgid) -> Result<usize> {
pub fn sys_setpgid(ctx: &ProcessCtx, pid: PidT, pgid: Pgid) -> Result<usize> {
if pid == 0 {
*current_task().process.pgid.lock_save_irq() = pgid;
*ctx.shared().process.pgid.lock_save_irq() = pgid;
} else if let Some(tg) = ThreadGroup::get(Tgid::from_pid_t(pid)) {
*tg.pgid.lock_save_irq() = pgid;
} else {

View File

@@ -6,7 +6,7 @@ use libkernel::{
use crate::{
memory::uaccess::{UserCopyable, copy_from_user, copy_to_user},
process::thread_group::{TG_LIST, Tgid},
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use super::pid::PidT;
@@ -184,6 +184,7 @@ impl ResourceLimits {
}
pub async fn sys_prlimit64(
ctx: &ProcessCtx,
pid: PidT,
resource: u32,
new_rlim: TUA<RLimit>,
@@ -192,7 +193,7 @@ pub async fn sys_prlimit64(
let resource: RlimitId = resource.try_into()?;
let task = if pid == 0 {
current_task().process.clone()
ctx.shared().process.clone()
} else {
TG_LIST
.lock_save_irq()

View File

@@ -1,10 +1,12 @@
use crate::{memory::uaccess::UserCopyable, sched::current::current_task};
use crate::{memory::uaccess::UserCopyable, process::Task, sched::current_work};
use alloc::sync::Arc;
use bitflags::bitflags;
use core::{
alloc::Layout,
fmt::Display,
mem::transmute,
ops::{Index, IndexMut},
sync::atomic::{AtomicU64, Ordering},
task::Poll,
};
use ksigaction::{KSignalAction, UserspaceSigAction};
@@ -102,6 +104,57 @@ impl SigSet {
}
}
/// An atomically-accessible signal set.
pub struct AtomicSigSet(AtomicU64);
impl AtomicSigSet {
pub const fn new(set: SigSet) -> Self {
Self(AtomicU64::new(set.bits()))
}
pub const fn empty() -> Self {
Self(AtomicU64::new(0))
}
/// Atomically insert a signal into the set.
pub fn insert(&self, signal: SigSet) {
self.0.fetch_or(signal.bits(), Ordering::Relaxed);
}
/// Check for a pending signal while respecting the mask, without removing
/// it.
pub fn peek_signal(&self, mask: SigSet) -> Option<SigId> {
SigSet::from_bits_retain(self.0.load(Ordering::Relaxed)).peek_signal(mask)
}
/// Atomically remove and return a pending signal while respecting the mask.
pub fn take_signal(&self, mask: SigSet) -> Option<SigId> {
loop {
let cur = self.0.load(Ordering::Relaxed);
let set = SigSet::from_bits_retain(cur);
let sig = set.peek_signal(mask)?;
let new = set.difference(sig.into()).bits();
match self
.0
.compare_exchange(cur, new, Ordering::Relaxed, Ordering::Relaxed)
{
Ok(_) => return Some(sig),
Err(_) => continue,
}
}
}
/// Load the current signal set as a plain `SigSet`.
pub fn load(&self) -> SigSet {
SigSet::from_bits_retain(self.0.load(Ordering::Relaxed))
}
/// Store a full signal set.
pub fn store(&self, set: SigSet) {
self.0.store(set.bits(), Ordering::Relaxed);
}
}
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[allow(clippy::upper_case_acronyms)]
@@ -276,6 +329,7 @@ pub trait Interruptable<T, F: Future<Output = T>> {
/// signal.
pub struct InterruptableFut<T, F: Future<Output = T>> {
sub_fut: F,
task: Arc<Task>,
}
impl<T, F: Future<Output = T>> Interruptable<T, F> for F {
@@ -283,7 +337,10 @@ impl<T, F: Future<Output = T>> Interruptable<T, F> for F {
// TODO: Set the task state to a new variant `Interruptable`. This
// allows the `deliver_signal` code to wake up a task to deliver a
// signal to where it will be actioned.
InterruptableFut { sub_fut: self }
InterruptableFut {
sub_fut: self,
task: Arc::clone(&*current_work()),
}
}
}
@@ -295,8 +352,9 @@ impl<T, F: Future<Output = T>> Future for InterruptableFut<T, F> {
cx: &mut core::task::Context<'_>,
) -> Poll<Self::Output> {
// Try the underlying future first.
let this = unsafe { self.get_unchecked_mut() };
let res = unsafe {
self.map_unchecked_mut(|f| &mut f.sub_fut)
core::pin::Pin::new_unchecked(&mut this.sub_fut)
.poll(cx)
.map(|x| InterruptResult::Uninterrupted(x))
};
@@ -306,7 +364,7 @@ impl<T, F: Future<Output = T>> Future for InterruptableFut<T, F> {
}
// See if there's a pending signal which interrupts this future.
if current_task().peek_signal().is_some() {
if this.task.peek_signal().is_some() {
Poll::Ready(InterruptResult::Interrupted)
} else {
Poll::Pending

View File

@@ -3,17 +3,17 @@ use crate::{
Tid,
thread_group::{Pgid, Tgid, ThreadGroup, pid::PidT},
},
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use super::{SigId, uaccess::UserSigId};
use crate::process::thread_group::TG_LIST;
use libkernel::error::{KernelError, Result};
pub fn sys_kill(pid: PidT, signal: UserSigId) -> Result<usize> {
pub fn sys_kill(ctx: &ProcessCtx, pid: PidT, signal: UserSigId) -> Result<usize> {
let signal: SigId = signal.try_into()?;
let current_task = current_task();
let current_task = ctx.shared();
// Kill ourselves
if pid == current_task.process.tgid.value() as PidT {
current_task.process.deliver_signal(signal);
@@ -63,9 +63,9 @@ pub fn sys_kill(pid: PidT, signal: UserSigId) -> Result<usize> {
Ok(0)
}
pub fn sys_tkill(tid: PidT, signal: UserSigId) -> Result<usize> {
pub fn sys_tkill(ctx: &ProcessCtx, tid: PidT, signal: UserSigId) -> Result<usize> {
let target_tid = Tid(tid as _);
let current_task = current_task();
let current_task = ctx.shared();
let signal: SigId = signal.try_into()?;

View File

@@ -3,7 +3,7 @@ use libkernel::error::{KernelError, Result};
use libkernel::memory::address::TUA;
use crate::memory::uaccess::{UserCopyable, copy_from_user, copy_to_user};
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use super::ksigaction::UserspaceSigAction;
use super::uaccess::UserSigId;
@@ -88,6 +88,7 @@ impl From<SigActionState> for UserSigAction {
}
pub async fn sys_rt_sigaction(
ctx: &ProcessCtx,
sig: UserSigId,
act: TUA<UserSigAction>,
oact: TUA<UserSigAction>,
@@ -110,7 +111,7 @@ pub async fn sys_rt_sigaction(
};
let old_action = {
let task = current_task();
let task = ctx.shared();
let mut sigstate = task.process.signals.lock_save_irq();
let old_action = sigstate.action[sig];

View File

@@ -1,6 +1,6 @@
use crate::{
memory::uaccess::{UserCopyable, copy_from_user, copy_to_user},
sched::current::current_task,
sched::syscall_ctx::ProcessCtx,
};
use bitflags::bitflags;
use libkernel::{
@@ -73,6 +73,7 @@ impl From<Option<AltSigStack>> for UserSigAltStack {
}
pub async fn sys_sigaltstack(
ctx: &ProcessCtx,
ss: TUA<UserSigAltStack>,
old_ss: TUA<UserSigAltStack>,
) -> Result<usize> {
@@ -83,7 +84,7 @@ pub async fn sys_sigaltstack(
};
let old_ss_value = {
let task = current_task();
let task = ctx.shared();
let mut signals = task.process.signals.lock_save_irq();
let old_ss_value = signals.alt_stack.clone();

View File

@@ -1,5 +1,5 @@
use crate::memory::uaccess::{copy_from_user, copy_to_user};
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::error::{KernelError, Result};
use libkernel::memory::address::TUA;
@@ -10,6 +10,7 @@ pub const SIG_UNBLOCK: u32 = 1;
pub const SIG_SETMASK: u32 = 2;
pub async fn sys_rt_sigprocmask(
ctx: &mut ProcessCtx,
how: u32,
set: TUA<SigSet>,
oldset: TUA<SigSet>,
@@ -26,8 +27,8 @@ pub async fn sys_rt_sigprocmask(
};
let old_sigmask = {
let mut task = current_task();
let old_sigmask = task.sig_mask;
let task = ctx.shared();
let old_sigmask = task.sig_mask.load();
if let Some(set) = set {
let mut new_sigmask = match how {
@@ -40,7 +41,7 @@ pub async fn sys_rt_sigprocmask(
// SIGSTOP and SIGKILL can never be masked.
new_sigmask.remove(UNMASKABLE_SIGNALS);
task.sig_mask = new_sigmask;
task.sig_mask.store(new_sigmask);
}
old_sigmask

View File

@@ -1,9 +1,9 @@
use core::convert::Infallible;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
pub fn sys_umask(new_umask: u32) -> core::result::Result<usize, Infallible> {
let task = current_task();
pub fn sys_umask(ctx: &ProcessCtx, new_umask: u32) -> core::result::Result<usize, Infallible> {
let task = ctx.shared();
let mut umask_guard = task.process.umask.lock_save_irq();
let old_umask = *umask_guard;

View File

@@ -4,7 +4,7 @@ use super::{
};
use crate::clock::timespec::TimeSpec;
use crate::memory::uaccess::{UserCopyable, copy_to_user};
use crate::sched::current::current_task_shared;
use crate::sched::syscall_ctx::ProcessCtx;
use crate::sync::CondVar;
use alloc::collections::btree_map::BTreeMap;
use bitflags::Flags;
@@ -212,6 +212,7 @@ fn find_waitable(
}
pub async fn sys_wait4(
ctx: &ProcessCtx,
pid: PidT,
stat_addr: TUA<i32>,
flags: u32,
@@ -246,7 +247,7 @@ pub async fn sys_wait4(
return Err(KernelError::NotSupported);
}
let task = current_task_shared();
let task = ctx.shared();
let child_proc_count = task.process.children.lock_save_irq().iter().count();
@@ -321,6 +322,7 @@ pub enum IdType {
}
pub async fn sys_waitid(
ctx: &ProcessCtx,
idtype: i32,
id: PidT,
infop: TUA<SigInfo>,
@@ -365,7 +367,7 @@ pub async fn sys_waitid(
IdType::P_PGID => -id.abs(), // negative means select by PGID in helpers
};
let task = current_task_shared();
let task = ctx.shared();
let child_proc_count = task.process.children.lock_save_irq().iter().count();
// Try immediate check if no children or WNOHANG

View File

@@ -1,4 +1,4 @@
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::UserAddressSpace;
use libkernel::error::{KernelError, Result};
use libkernel::memory::address::{TUA, VA};
@@ -10,8 +10,8 @@ pub enum FutexKey {
}
impl FutexKey {
pub fn new_private(uaddr: TUA<u32>) -> Self {
let pid = current_task().process.tgid.value();
pub fn new_private(ctx: &ProcessCtx, uaddr: TUA<u32>) -> Self {
let pid = ctx.shared().process.tgid.value();
Self::Private {
pid,
@@ -19,8 +19,9 @@ impl FutexKey {
}
}
pub fn new_shared(uaddr: TUA<u32>) -> Result<Self> {
let pg_info = current_task()
pub fn new_shared(ctx: &ProcessCtx, uaddr: TUA<u32>) -> Result<Self> {
let pg_info = ctx
.shared()
.vm
.lock_save_irq()
.mm_mut()

View File

@@ -2,6 +2,7 @@ use crate::clock::realtime::date;
use crate::clock::timespec::TimeSpec;
use crate::drivers::timer::sleep;
use crate::process::thread_group::signal::{InterruptResult, Interruptable};
use crate::sched::syscall_ctx::ProcessCtx;
use crate::sync::{OnceLock, SpinLock};
use alloc::boxed::Box;
use alloc::{collections::btree_map::BTreeMap, sync::Arc};
@@ -91,6 +92,7 @@ async fn do_futex_wait(
}
pub async fn sys_futex(
ctx: &ProcessCtx,
uaddr: TUA<u32>,
op: i32,
val: u32,
@@ -102,9 +104,9 @@ pub async fn sys_futex(
let cmd = op & !FUTEX_PRIVATE_FLAG;
let key = if op & FUTEX_PRIVATE_FLAG != 0 {
FutexKey::new_private(uaddr)
FutexKey::new_private(ctx, uaddr)
} else {
FutexKey::new_shared(uaddr)?
FutexKey::new_shared(ctx, uaddr)?
};
match cmd {

View File

@@ -1,7 +1,7 @@
use core::ffi::c_long;
use core::mem::size_of;
use crate::sched::current::current_task;
use crate::sched::syscall_ctx::ProcessCtx;
use libkernel::{
error::{KernelError, Result},
memory::address::TUA,
@@ -9,8 +9,8 @@ use libkernel::{
pub mod futex;
pub fn sys_set_tid_address(tidptr: TUA<u32>) -> Result<usize> {
let mut task = current_task();
pub fn sys_set_tid_address(ctx: &mut ProcessCtx, tidptr: TUA<u32>) -> Result<usize> {
let task = ctx.task_mut();
task.child_tid_ptr = Some(tidptr);
@@ -31,12 +31,16 @@ pub struct RobustListHead {
list_op_pending: RobustList,
}
pub async fn sys_set_robust_list(head: TUA<RobustListHead>, len: usize) -> Result<usize> {
pub async fn sys_set_robust_list(
ctx: &mut ProcessCtx,
head: TUA<RobustListHead>,
len: usize,
) -> Result<usize> {
if core::hint::unlikely(len != size_of::<RobustListHead>()) {
return Err(KernelError::InvalidValue);
}
let mut task = current_task();
let task = ctx.task_mut();
task.robust_list.replace(head);
Ok(0)

View File

@@ -1,119 +0,0 @@
use crate::{
per_cpu_private,
process::{Task, owned::OwnedTask},
};
use alloc::sync::Arc;
use core::{
cell::Cell,
marker::PhantomData,
ops::{Deref, DerefMut},
ptr,
};
per_cpu_private! {
pub(super) static CUR_TASK_PTR: CurrentTaskPtr = CurrentTaskPtr::new;
}
pub(super) struct CurrentTaskPtr {
pub(super) ptr: Cell<*mut OwnedTask>,
pub(super) borrowed: Cell<bool>,
location: Cell<Option<core::panic::Location<'static>>>,
}
unsafe impl Send for CurrentTaskPtr {}
pub struct CurrentTaskGuard<'a> {
task: &'a mut OwnedTask,
_marker: PhantomData<*const ()>,
}
impl Deref for CurrentTaskGuard<'_> {
type Target = OwnedTask;
fn deref(&self) -> &Self::Target {
self.task
}
}
impl DerefMut for CurrentTaskGuard<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.task
}
}
impl<'a> Drop for CurrentTaskGuard<'a> {
fn drop(&mut self) {
let current = CUR_TASK_PTR.borrow();
current.borrowed.set(false);
current.location.set(None);
}
}
impl CurrentTaskPtr {
pub const fn new() -> Self {
Self {
ptr: Cell::new(ptr::null_mut()),
borrowed: Cell::new(false),
location: Cell::new(None),
}
}
#[track_caller]
pub fn current(&self) -> CurrentTaskGuard<'static> {
if self.borrowed.get() {
let other = self.location.take();
panic!("Double mutable borrow of current task! Borrowed from: {other:?}");
}
self.borrowed.set(true);
self.location.set(Some(*core::panic::Location::caller()));
unsafe {
let ptr = self.ptr.get();
CurrentTaskGuard {
task: &mut *ptr,
_marker: PhantomData,
}
}
}
pub(super) fn set_current(&self, task: *mut OwnedTask) {
self.ptr.set(task);
}
}
/// Returns a mutable reference to the CPU-local private task state
/// (`OwnedTask`).
///
/// # Panics
///
/// Panics if the current task is already borrowed on this CPU (reentrancy bug).
/// This usually happens if you call `current_task()` and then call a function
/// that also calls `current_task()` without dropping the first guard.
///
/// # Critical Section
///
/// This function disables preemption. You must drop the returned guard before
/// attempting to sleep, yield, or await.
#[track_caller]
pub fn current_task() -> CurrentTaskGuard<'static> {
CUR_TASK_PTR.borrow_mut().current()
}
/// Returns a shared reference to the Process Identity (`Task`).
///
/// Use this for accessing shared resources like:
/// - File Descriptors
/// - Virtual Memory (Page Tables)
/// - Current Working Directory
/// - Credentials / PID / Thread Group
///
/// # Execution Context
///
/// This function creates a temporary `CurrentTaskGuard` just long enough to
/// clone the `Arc`, then drops it. It is safe to await or yield after calling
/// this function, as it does not hold the CPU lock.
pub fn current_task_shared() -> Arc<Task> {
current_task().t_shared.clone()
}

View File

@@ -10,16 +10,16 @@ use core::fmt::Debug;
use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use core::task::Waker;
use core::time::Duration;
use current::{CUR_TASK_PTR, current_task};
use libkernel::error::Result;
use log::warn;
use runqueue::RunQueue;
use sched_task::{RunnableTask, Work};
use syscall_ctx::ProcessCtx;
use waker::create_waker;
pub mod current;
mod runqueue;
pub mod sched_task;
pub mod syscall_ctx;
pub mod uspc_ret;
pub mod waker;
@@ -130,8 +130,8 @@ fn schedule() {
drop(deferred);
}
pub fn spawn_kernel_work(fut: impl Future<Output = ()> + 'static + Send) {
current_task().ctx.put_kernel_work(Box::pin(fut));
pub fn spawn_kernel_work(ctx: &mut ProcessCtx, fut: impl Future<Output = ()> + 'static + Send) {
ctx.task_mut().ctx.put_kernel_work(Box::pin(fut));
}
#[cfg(feature = "smp")]

View File

@@ -1,5 +1,5 @@
use super::{
CUR_TASK_PTR, NUM_CONTEXT_SWITCHES,
NUM_CONTEXT_SWITCHES,
sched_task::{RunnableTask, Work, state::TaskState},
};
use crate::{
@@ -145,20 +145,12 @@ impl RunQueue {
next_task.switch_context();
next_task.work.reset_last_account(now);
CUR_TASK_PTR
.borrow_mut()
.set_current(Box::as_ptr(&next_task.work.task) as *mut _);
}
self.running_task = Some(next_task);
} else {
// No next task. Go idle.
self.idle.switch_context();
CUR_TASK_PTR
.borrow_mut()
.set_current(Box::as_ptr(&self.idle.work.task) as *mut _);
}
deferred_drops

View File

@@ -35,7 +35,7 @@ pub struct SchedulerData {
}
impl SchedulerData {
fn new(task: &Box<OwnedTask>) -> Self {
fn new(task: &OwnedTask) -> Self {
Self {
v_runtime: 0,
v_eligible: 0,
@@ -56,7 +56,6 @@ pub struct Work {
pub sched_data: SpinLock<Option<SchedulerData>>,
}
impl Deref for Work {
type Target = Arc<Task>;
@@ -111,7 +110,6 @@ impl Drop for RunnableTask {
}
}
impl Deref for RunnableTask {
type Target = SchedulerData;

74
src/sched/syscall_ctx.rs Normal file
View File

@@ -0,0 +1,74 @@
use crate::process::{Task, owned::OwnedTask};
use alloc::sync::Arc;
/// Provides access to the current task's state.
///
/// Any function that is marked with `ProcessCtx` should only be callable from a
/// context which is backed-by a userspace context. As such, they should take a
/// `ProcessCtx` as an argument to enforce this requirement. A new `ProcessCtx`
/// is created by the arch layer following entry into the kernel from a process
/// context and it is passed to relevent functions.
pub struct ProcessCtx {
task: *mut OwnedTask,
}
// Safety: The kernel guarantees that an OwnedTask is only accessed by one CPU
// at a time.
unsafe impl Send for ProcessCtx {}
// Safety: The kernel guarantees that an OwnedTask is only accessed by one CPU
// at a time.
unsafe impl Sync for ProcessCtx {}
impl ProcessCtx {
/// Create a `ProcessCtx` from a raw pointer to the current task.
///
/// # Safety
///
/// - `task` must point to a valid, heap-allocated `OwnedTask` that will
/// remain alive for the lifetime of this `ProcessCtx`.
/// - The caller must ensure single-CPU access: no other mutable references
/// to the `OwnedTask` may exist concurrently.
pub unsafe fn new(task: *mut OwnedTask) -> Self {
debug_assert!(!task.is_null());
Self { task }
}
/// Create a `ProcessCtx` for the currently-running task on this CPU.
///
/// Obtains the raw pointer from the scheduler's `current_work()`.
///
/// # Safety
///
/// The caller must ensure single-CPU access: no other mutable references to
/// the `OwnedTask` may exist concurrently. Furthermore, the caller must
/// ensure that the kernel has been entered when in a process ctx, for
/// example when handling a synchronous exception from userspace.
pub unsafe fn from_current() -> Self {
let work = super::current_work();
unsafe { Self::new(alloc::boxed::Box::as_ptr(&work.task) as *mut _) }
}
/// Shared access to the CPU-local owned task.
pub fn task(&self) -> &OwnedTask {
unsafe { &*self.task }
}
/// Create a new `SyscallCtx` pointing to the same task.
///
/// # Safety
///
/// The caller must ensure that the cloned value and `self` aren't used
/// concurrently.
pub unsafe fn clone(&self) -> Self {
unsafe { Self::new(self.task) }
}
pub fn task_mut(&mut self) -> &mut OwnedTask {
unsafe { &mut *self.task }
}
pub fn shared(&self) -> &Arc<Task> {
&self.task().t_shared
}
}

View File

@@ -1,4 +1,4 @@
use super::{current::current_task, current_work, current_work_waker, schedule};
use super::{current_work, current_work_waker, schedule};
use crate::{
arch::{Arch, ArchImpl},
process::{
@@ -9,6 +9,7 @@ use crate::{
wait::ChildState,
},
},
sched::syscall_ctx::ProcessCtx,
};
use alloc::boxed::Box;
use core::{ptr, sync::atomic::Ordering, task::Poll};
@@ -79,23 +80,26 @@ fn try_sleep_current() -> bool {
/// This function will panic if it detects an attempt to process signals or
/// kernel work for the special idle task, as this indicates a critical bug in
/// the scheduler or task management.
pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
pub fn dispatch_userspace_task(frame: *mut UserCtx) {
let mut state = State::PickNewTask;
// SAFETY: Access is exclusive since we're not polling any futures.
let mut ctx = unsafe { ProcessCtx::from_current() };
'dispatch: loop {
match state {
State::PickNewTask => {
// Pick a new task, potentially context switching to a new task.
schedule();
// SAFETY: As above.
ctx = unsafe { ProcessCtx::from_current() };
state = State::ProcessKernelWork;
}
State::ProcessKernelWork => {
// First, let's handle signals. If there is any scheduled signal
// work (this has to be async to handle faults, etc).
let (signal_work, is_idle) = {
let mut task = current_task();
(task.ctx.take_signal_work(), task.is_idle_task())
};
let signal_work = ctx.task_mut().ctx.take_signal_work();
let is_idle = ctx.task().is_idle_task();
if let Some(mut signal_work) = signal_work {
if is_idle {
@@ -106,23 +110,23 @@ pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
.as_mut()
.poll(&mut core::task::Context::from_waker(&current_work_waker()))
{
Poll::Ready(Ok(state)) => {
Poll::Ready(Ok(restored)) => {
// Signal actioning is complete. Return to userspace.
unsafe { ptr::copy_nonoverlapping(&state as _, ctx, 1) };
unsafe { ptr::copy_nonoverlapping(&restored as _, frame, 1) };
return;
}
Poll::Ready(Err(_)) => {
// If we errored, then we *cannot* progress the task.
// Delivery of the signal failed. Force the process to
// terminate.
kernel_exit_with_signal(SigId::SIGSEGV, true);
kernel_exit_with_signal(ctx.shared().clone(), SigId::SIGSEGV, true);
// Look for another task, this one is now dead.
state = State::PickNewTask;
continue;
}
Poll::Pending => {
current_task().ctx.put_signal_work(signal_work);
ctx.task_mut().ctx.put_signal_work(signal_work);
if try_sleep_current() {
state = State::PickNewTask;
@@ -135,7 +139,7 @@ pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
}
// Now let's handle any kernel work that's been spawned for this task.
let kern_work = current_task().ctx.take_kernel_work();
let kern_work = ctx.task_mut().ctx.take_kernel_work();
if let Some(mut kern_work) = kern_work {
if is_idle {
panic!("Idle process should never have kernel work");
@@ -164,7 +168,7 @@ pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
continue;
}
Poll::Pending => {
current_task().ctx.put_kernel_work(kern_work);
ctx.task_mut().ctx.put_kernel_work(kern_work);
if try_sleep_current() {
state = State::PickNewTask;
@@ -179,108 +183,111 @@ pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
// No kernel work. Check for any pending signals.
// We never handle signals for the idle task.
if current_task().is_idle_task() {
if ctx.task().is_idle_task() {
state = State::ReturnToUserspace;
continue;
}
{
let mut task = current_task();
while let Some(signal) = task.take_signal() {
let mut ptrace = task.ptrace.lock_save_irq();
if ptrace.trace_signal(signal, task.ctx.user()) {
ptrace.set_waker(current_work_waker());
ptrace.notify_tracer_of_trap(&task.process);
drop(ptrace);
drop(task);
if current_work().state.try_pending_stop() {
state = State::PickNewTask;
} else {
// Woken concurrently (tracer already resumed us).
state = State::ProcessKernelWork;
}
continue 'dispatch;
}
while let Some(signal) = ctx.task().take_signal() {
let mut ptrace = ctx.task().ptrace.lock_save_irq();
if ptrace.trace_signal(signal, ctx.task().ctx.user()) {
ptrace.set_waker(current_work_waker());
ptrace.notify_tracer_of_trap(&ctx.task().process);
drop(ptrace);
let sigaction = task.process.signals.lock_save_irq().action_signal(signal);
if current_work().state.try_pending_stop() {
state = State::PickNewTask;
} else {
// Woken concurrently (tracer already resumed us).
state = State::ProcessKernelWork;
}
continue 'dispatch;
}
drop(ptrace);
match sigaction {
// Signal ignored, look for another.
None => continue,
Some(KSignalAction::Term | KSignalAction::Core) => {
// Terminate the process, and find a new task.
drop(task);
kernel_exit_with_signal(signal, false);
let sigaction = ctx
.task()
.process
.signals
.lock_save_irq()
.action_signal(signal);
state = State::PickNewTask;
continue 'dispatch;
match sigaction {
// Signal ignored, look for another.
None => continue,
Some(KSignalAction::Term | KSignalAction::Core) => {
// Terminate the process, and find a new task.
kernel_exit_with_signal(ctx.shared().clone(), signal, false);
state = State::PickNewTask;
continue 'dispatch;
}
Some(KSignalAction::Stop) => {
// Default action: stop (suspend) the entire process.
let process = &ctx.task().process;
// Notify the parent that we have stopped (SIGCHLD).
if let Some(parent) = process
.parent
.lock_save_irq()
.as_ref()
.and_then(|p| p.upgrade())
{
parent
.child_notifiers
.child_update(process.tgid, ChildState::Stop { signal });
parent.deliver_signal(SigId::SIGCHLD);
}
Some(KSignalAction::Stop) => {
// Default action: stop (suspend) the entire process.
let process = &task.process;
// Notify the parent that we have stopped (SIGCHLD).
if let Some(parent) = process
.parent
.lock_save_irq()
.as_ref()
.and_then(|p| p.upgrade())
{
parent
.child_notifiers
.child_update(process.tgid, ChildState::Stop { signal });
parent.deliver_signal(SigId::SIGCHLD);
for thr_weak in process.tasks.lock_save_irq().values() {
if let Some(thr) = thr_weak.upgrade() {
thr.state.try_pending_stop();
}
for thr_weak in process.tasks.lock_save_irq().values() {
if let Some(thr) = thr_weak.upgrade() {
thr.state.try_pending_stop();
}
}
state = State::PickNewTask;
continue 'dispatch;
}
Some(KSignalAction::Continue) => {
let process = &task.process;
// Wake up all stopped/sleeping threads in the process.
for thr_weak in process.tasks.lock_save_irq().values() {
if let Some(thr) = thr_weak.upgrade() {
crate::sched::waker::create_waker(thr).wake();
}
state = State::PickNewTask;
continue 'dispatch;
}
Some(KSignalAction::Continue) => {
let process = &ctx.task().process;
// Wake up all stopped/sleeping threads in the process.
for thr_weak in process.tasks.lock_save_irq().values() {
if let Some(thr) = thr_weak.upgrade() {
crate::sched::waker::create_waker(thr).wake();
}
// Notify the parent that we have continued (SIGCHLD).
if let Some(parent) = process
.parent
.lock_save_irq()
.as_ref()
.and_then(|p| p.upgrade())
{
parent
.child_notifiers
.child_update(process.tgid, ChildState::Continue);
parent.deliver_signal(SigId::SIGCHLD);
}
// Re-process kernel work for this task (there may be more to do).
state = State::ProcessKernelWork;
continue 'dispatch;
}
Some(KSignalAction::Userspace(id, action)) => {
let fut = ArchImpl::do_signal(id, action);
task.ctx.put_signal_work(Box::pin(fut));
// Notify the parent that we have continued (SIGCHLD).
if let Some(parent) = process
.parent
.lock_save_irq()
.as_ref()
.and_then(|p| p.upgrade())
{
parent
.child_notifiers
.child_update(process.tgid, ChildState::Continue);
state = State::ProcessKernelWork;
continue 'dispatch;
parent.deliver_signal(SigId::SIGCHLD);
}
// Re-process kernel work for this task (there may be more to do).
state = State::ProcessKernelWork;
continue 'dispatch;
}
Some(KSignalAction::Userspace(id, action)) => {
// SAFETY: Signal work will be polled indepdently of
// kernel work. Therefore there will be no
// concurrent accesses of the ctx.
let ctx2 = unsafe { ctx.clone() };
let fut = ArchImpl::do_signal(ctx2, id, action);
ctx.task_mut().ctx.put_signal_work(Box::pin(fut));
state = State::ProcessKernelWork;
continue 'dispatch;
}
}
}
@@ -290,7 +297,7 @@ pub fn dispatch_userspace_task(ctx: *mut UserCtx) {
State::ReturnToUserspace => {
// Real user-space return now.
current_task().ctx.restore_user_ctx(ctx);
ctx.task().ctx.restore_user_ctx(frame);
return;
}
}