Merge pull request #113 from hexagonal-sun/make-task-list-arc-task

process: `TASK_LIST`: point to `Task` struct
This commit is contained in:
Matthew Leach
2025-12-29 20:38:56 +00:00
committed by GitHub
6 changed files with 22 additions and 20 deletions

View File

@@ -1,7 +1,8 @@
#![allow(clippy::module_name_repetitions)]
use crate::process::find_task_by_descriptor;
use crate::process::thread_group::Tgid;
use crate::sched::{current_task, find_task_by_descriptor};
use crate::sched::current_task;
use crate::sync::OnceLock;
use crate::{
drivers::{Driver, FilesystemDriver},

View File

@@ -159,14 +159,14 @@ pub async fn sys_clone(
}
};
TASK_LIST
.lock_save_irq()
.insert(new_task.descriptor(), Arc::downgrade(&new_task.state));
let tid = new_task.tid;
let task = Arc::new(new_task);
TASK_LIST
.lock_save_irq()
.insert(task.descriptor(), Arc::downgrade(&task));
sched::insert_task_cross_cpu(task.clone());
task.process

View File

@@ -1,5 +1,5 @@
use super::{
TaskState,
TASK_LIST, TaskState,
thread_group::{ProcessState, Tgid, ThreadGroup, signal::SigId, wait::ChildState},
threading::futex::{self, key::FutexKey},
};
@@ -125,6 +125,8 @@ pub async fn sys_exit(exit_code: usize) -> Result<usize> {
.filter(|t| t.upgrade().is_some())
.count();
TASK_LIST.lock_save_irq().remove(&task.descriptor());
if live_tasks <= 1 {
// We are the last task. This is equivalent to an exit_group. The exit
// code for an implicit exit_group is often 0.

View File

@@ -306,7 +306,14 @@ impl Task {
}
}
pub static TASK_LIST: SpinLock<BTreeMap<TaskDescriptor, Weak<SpinLock<TaskState>>>> =
pub fn find_task_by_descriptor(descriptor: &TaskDescriptor) -> Option<Arc<Task>> {
TASK_LIST
.lock_save_irq()
.get(descriptor)
.and_then(|x| x.upgrade())
}
pub static TASK_LIST: SpinLock<BTreeMap<TaskDescriptor, Weak<Task>>> =
SpinLock::new(BTreeMap::new());
unsafe impl Send for Task {}

View File

@@ -45,14 +45,6 @@ pub const VT_ONE: u128 = 1u128 << VT_FIXED_SHIFT;
/// Two virtual-time instants whose integer parts differ by no more than this constant are considered equal.
pub const VCLOCK_EPSILON: u128 = VT_ONE;
pub fn find_task_by_descriptor(descriptor: &TaskDescriptor) -> Option<Arc<Task>> {
if let Some(task) = SCHED_STATE.borrow().run_queue.get(descriptor) {
return Some(task.clone());
}
// TODO: Ping other CPUs to find the task.
None
}
/// Schedule a new task.
///
/// This function is the core of the kernel's scheduler. It is responsible for
@@ -431,8 +423,8 @@ pub fn sched_init() {
{
let mut task_list = TASK_LIST.lock_save_irq();
task_list.insert(idle_task.descriptor(), Arc::downgrade(&idle_task.state));
task_list.insert(init_task.descriptor(), Arc::downgrade(&init_task.state));
task_list.insert(idle_task.descriptor(), Arc::downgrade(&idle_task));
task_list.insert(init_task.descriptor(), Arc::downgrade(&init_task));
}
insert_task(idle_task);

View File

@@ -9,10 +9,10 @@ unsafe fn clone_waker(data: *const ()) -> RawWaker {
unsafe fn wake_waker(data: *const ()) {
let desc = TaskDescriptor::from_ptr(data);
if let Some(proc) = TASK_LIST.lock_save_irq().get(&desc)
&& let Some(proc) = proc.upgrade()
if let Some(task) = TASK_LIST.lock_save_irq().get(&desc)
&& let Some(task) = task.upgrade()
{
let mut state = proc.lock_save_irq();
let mut state = task.state.lock_save_irq();
match *state {
// If the task has been put to sleep, then wake it up.
TaskState::Sleeping => {