From 9e80a6ae8af44293d33c3f70d1d147ee61a07ac0 Mon Sep 17 00:00:00 2001 From: Matthew Leach Date: Mon, 29 Dec 2025 17:05:43 +0000 Subject: [PATCH] process: `TASK_LIST`: point to `Task` struct Make the global `TASK_LIST` struct be a collection of `Task`s, rather than `task.state` struct members. This allows other cores to access to any shared task state easily. --- src/drivers/fs/proc.rs | 3 ++- src/process/clone.rs | 8 ++++---- src/process/exit.rs | 4 +++- src/process/mod.rs | 9 ++++++++- src/sched/mod.rs | 12 ++---------- src/sched/waker.rs | 6 +++--- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/drivers/fs/proc.rs b/src/drivers/fs/proc.rs index 9f7e026..74d6420 100644 --- a/src/drivers/fs/proc.rs +++ b/src/drivers/fs/proc.rs @@ -1,7 +1,8 @@ #![allow(clippy::module_name_repetitions)] +use crate::process::find_task_by_descriptor; use crate::process::thread_group::Tgid; -use crate::sched::{current_task, find_task_by_descriptor}; +use crate::sched::current_task; use crate::sync::OnceLock; use crate::{ drivers::{Driver, FilesystemDriver}, diff --git a/src/process/clone.rs b/src/process/clone.rs index 958d36f..5b3d00b 100644 --- a/src/process/clone.rs +++ b/src/process/clone.rs @@ -159,14 +159,14 @@ pub async fn sys_clone( } }; - TASK_LIST - .lock_save_irq() - .insert(new_task.descriptor(), Arc::downgrade(&new_task.state)); - let tid = new_task.tid; let task = Arc::new(new_task); + TASK_LIST + .lock_save_irq() + .insert(task.descriptor(), Arc::downgrade(&task)); + sched::insert_task_cross_cpu(task.clone()); task.process diff --git a/src/process/exit.rs b/src/process/exit.rs index 46e979e..9faf4bb 100644 --- a/src/process/exit.rs +++ b/src/process/exit.rs @@ -1,5 +1,5 @@ use super::{ - TaskState, + TASK_LIST, TaskState, thread_group::{ProcessState, Tgid, ThreadGroup, signal::SigId, wait::ChildState}, threading::futex::{self, key::FutexKey}, }; @@ -125,6 +125,8 @@ pub async fn sys_exit(exit_code: usize) -> Result { .filter(|t| t.upgrade().is_some()) .count(); + TASK_LIST.lock_save_irq().remove(&task.descriptor()); + if live_tasks <= 1 { // We are the last task. This is equivalent to an exit_group. The exit // code for an implicit exit_group is often 0. diff --git a/src/process/mod.rs b/src/process/mod.rs index 5751e5f..212c7ad 100644 --- a/src/process/mod.rs +++ b/src/process/mod.rs @@ -306,7 +306,14 @@ impl Task { } } -pub static TASK_LIST: SpinLock>>> = +pub fn find_task_by_descriptor(descriptor: &TaskDescriptor) -> Option> { + TASK_LIST + .lock_save_irq() + .get(descriptor) + .and_then(|x| x.upgrade()) +} + +pub static TASK_LIST: SpinLock>> = SpinLock::new(BTreeMap::new()); unsafe impl Send for Task {} diff --git a/src/sched/mod.rs b/src/sched/mod.rs index 8709fdc..c90f7de 100644 --- a/src/sched/mod.rs +++ b/src/sched/mod.rs @@ -45,14 +45,6 @@ pub const VT_ONE: u128 = 1u128 << VT_FIXED_SHIFT; /// Two virtual-time instants whose integer parts differ by no more than this constant are considered equal. pub const VCLOCK_EPSILON: u128 = VT_ONE; -pub fn find_task_by_descriptor(descriptor: &TaskDescriptor) -> Option> { - if let Some(task) = SCHED_STATE.borrow().run_queue.get(descriptor) { - return Some(task.clone()); - } - // TODO: Ping other CPUs to find the task. - None -} - /// Schedule a new task. /// /// This function is the core of the kernel's scheduler. It is responsible for @@ -431,8 +423,8 @@ pub fn sched_init() { { let mut task_list = TASK_LIST.lock_save_irq(); - task_list.insert(idle_task.descriptor(), Arc::downgrade(&idle_task.state)); - task_list.insert(init_task.descriptor(), Arc::downgrade(&init_task.state)); + task_list.insert(idle_task.descriptor(), Arc::downgrade(&idle_task)); + task_list.insert(init_task.descriptor(), Arc::downgrade(&init_task)); } insert_task(idle_task); diff --git a/src/sched/waker.rs b/src/sched/waker.rs index 20dd21a..e8d4e87 100644 --- a/src/sched/waker.rs +++ b/src/sched/waker.rs @@ -9,10 +9,10 @@ unsafe fn clone_waker(data: *const ()) -> RawWaker { unsafe fn wake_waker(data: *const ()) { let desc = TaskDescriptor::from_ptr(data); - if let Some(proc) = TASK_LIST.lock_save_irq().get(&desc) - && let Some(proc) = proc.upgrade() + if let Some(task) = TASK_LIST.lock_save_irq().get(&desc) + && let Some(task) = task.upgrade() { - let mut state = proc.lock_save_irq(); + let mut state = task.state.lock_save_irq(); match *state { // If the task has been put to sleep, then wake it up. TaskState::Sleeping => {