Merge pull request #273 from hexagonal-sun/ptrace-thread

ptrace thread
This commit is contained in:
Matthew Leach
2026-03-24 06:10:57 +00:00
committed by GitHub
19 changed files with 263 additions and 310 deletions

View File

@@ -3,7 +3,8 @@ use crate::drivers::fs::proc::get_inode_id;
use crate::drivers::fs::proc::meminfo::ProcMeminfoInode;
use crate::drivers::fs::proc::stat::ProcStatInode;
use crate::drivers::fs::proc::task::ProcTaskInode;
use crate::process::{TASK_LIST, TaskDescriptor, Tid};
use crate::process::thread_group::pid::PidT;
use crate::process::{TASK_LIST, TaskDescriptor, Tid, find_task_by_tid};
use crate::sched::current_work;
use alloc::boxed::Box;
use alloc::string::ToString;
@@ -44,6 +45,7 @@ impl Inode for ProcRootInode {
// Lookup a PID directory.
let desc = if name == "self" {
// FIXME: The group leader may have exited.
TaskDescriptor::from_tgid_tid(current.pgid(), Tid::from_tgid(current.pgid()))
} else if name == "thread-self" {
current.descriptor()
@@ -60,18 +62,15 @@ impl Inode for ProcRootInode {
InodeId::from_fsid_and_inodeid(self.id.fs_id(), get_inode_id(&["cmdline"])),
)));
} else {
let pid: u32 = name.parse().map_err(|_| FsError::NotFound)?;
let pid: PidT = name.parse().map_err(|_| FsError::NotFound)?;
// Search for the task descriptor.
TASK_LIST
.lock_save_irq()
.keys()
.find(|d| d.tgid().value() == pid)
.cloned()
find_task_by_tid(Tid::from_pid_t(pid))
.ok_or(FsError::NotFound)?
.descriptor()
};
Ok(Arc::new(ProcTaskInode::new(
desc,
desc.tid(),
false,
InodeId::from_fsid_and_inodeid(self.id.fs_id(), get_inode_id(&[name])),
)))
@@ -85,14 +84,14 @@ impl Inode for ProcRootInode {
let mut entries: Vec<Dirent> = Vec::new();
// Gather task list under interrupt-safe lock.
let task_list = TASK_LIST.lock_save_irq();
for (desc, _) in task_list
for (tid, _) in task_list
.iter()
.filter(|(_, task)| task.upgrade().is_some())
{
let name = desc.tgid().value().to_string();
let name = tid.value().to_string();
let inode_id = InodeId::from_fsid_and_inodeid(
PROCFS_ID,
get_inode_id(&[&desc.tgid().value().to_string()]),
get_inode_id(&[&tid.value().to_string()]),
);
let next_offset = (entries.len() + 1) as u64;
entries.push(Dirent::new(

View File

@@ -1,6 +1,6 @@
use crate::drivers::fs::proc::{get_inode_id, procfs};
use crate::process::fd_table::Fd;
use crate::process::{TaskDescriptor, find_task_by_descriptor};
use crate::process::{Tid, find_task_by_tid};
use crate::sched::current_work;
use alloc::borrow::ToOwned;
use alloc::boxed::Box;
@@ -20,12 +20,12 @@ use libkernel::fs::{
pub struct ProcFdInode {
id: InodeId,
attr: FileAttr,
desc: TaskDescriptor,
tid: Tid,
fd_info: bool,
}
impl ProcFdInode {
pub fn new(desc: TaskDescriptor, fd_info: bool, inode_id: InodeId) -> Self {
pub fn new(tid: Tid, fd_info: bool, inode_id: InodeId) -> Self {
Self {
id: inode_id,
attr: FileAttr {
@@ -33,7 +33,7 @@ impl ProcFdInode {
// Define appropriate file attributes for fdinfo.
..FileAttr::default()
},
desc,
tid,
fd_info,
}
}
@@ -63,10 +63,10 @@ impl Inode for ProcFdInode {
let fs = procfs();
let inode_id = InodeId::from_fsid_and_inodeid(
fs.id(),
get_inode_id(&[&self.desc.tid().value().to_string(), self.dir_name(), name]),
get_inode_id(&[&self.tid.value().to_string(), self.dir_name(), name]),
);
Ok(Arc::new(ProcFdFile::new(
self.desc,
self.tid,
self.fd_info,
fd,
inode_id,
@@ -74,7 +74,7 @@ impl Inode for ProcFdInode {
}
async fn readdir(&self, start_offset: u64) -> Result<Box<dyn DirStream>> {
let task = find_task_by_descriptor(&self.desc).ok_or(FsError::NotFound)?;
let task = find_task_by_tid(self.tid).ok_or(FsError::NotFound)?;
let fd_table = task.fd_table.lock_save_irq();
let mut entries = Vec::new();
for fd in 0..fd_table.len() {
@@ -86,11 +86,7 @@ impl Inode for ProcFdInode {
entries.push(Dirent {
id: InodeId::from_fsid_and_inodeid(
self.id.fs_id(),
get_inode_id(&[
&self.desc.tid().value().to_string(),
self.dir_name(),
&fd_str,
]),
get_inode_id(&[&self.tid.value().to_string(), self.dir_name(), &fd_str]),
),
offset: next_offset,
file_type: FileType::File,
@@ -107,13 +103,13 @@ impl Inode for ProcFdInode {
pub struct ProcFdFile {
id: InodeId,
attr: FileAttr,
desc: TaskDescriptor,
tid: Tid,
fd_info: bool,
fd: i32,
}
impl ProcFdFile {
pub fn new(desc: TaskDescriptor, fd_info: bool, fd: i32, inode_id: InodeId) -> Self {
pub fn new(tid: Tid, fd_info: bool, fd: i32, inode_id: InodeId) -> Self {
Self {
id: inode_id,
attr: FileAttr {
@@ -125,7 +121,7 @@ impl ProcFdFile {
// Define appropriate file attributes for fdinfo file.
..FileAttr::default()
},
desc,
tid,
fd_info,
fd,
}
@@ -143,7 +139,7 @@ impl SimpleFile for ProcFdFile {
}
async fn read(&self) -> Result<Vec<u8>> {
let task = find_task_by_descriptor(&self.desc).ok_or(FsError::NotFound)?;
let task = find_task_by_tid(self.tid).ok_or(FsError::NotFound)?;
let fd_entry = task
.fd_table
.lock_save_irq()
@@ -160,7 +156,7 @@ impl SimpleFile for ProcFdFile {
async fn readlink(&self) -> Result<PathBuf> {
if !self.fd_info {
if let Some(task) = find_task_by_descriptor(&self.desc) {
if let Some(task) = find_task_by_tid(self.tid) {
let Some(file) = task.fd_table.lock_save_irq().get(Fd(self.fd)) else {
return Err(FsError::NotFound.into());
};

View File

@@ -6,7 +6,7 @@ mod task_file;
use crate::drivers::fs::proc::task::task_file::{ProcTaskFileInode, TaskFileType};
use crate::drivers::fs::proc::{get_inode_id, procfs};
use crate::process::TaskDescriptor;
use crate::process::Tid;
use alloc::boxed::Box;
use alloc::string::ToString;
use alloc::sync::Arc;
@@ -21,12 +21,12 @@ use libkernel::fs::{
pub struct ProcTaskInode {
id: InodeId,
attr: FileAttr,
desc: TaskDescriptor,
tid: Tid,
is_task_dir: bool,
}
impl ProcTaskInode {
pub fn new(desc: TaskDescriptor, is_task_dir: bool, inode_id: InodeId) -> Self {
pub fn new(tid: Tid, is_task_dir: bool, inode_id: InodeId) -> Self {
Self {
id: inode_id,
attr: FileAttr {
@@ -34,7 +34,7 @@ impl ProcTaskInode {
permissions: FilePermissions::from_bits_retain(0o555),
..FileAttr::default()
},
desc,
tid,
is_task_dir,
}
}
@@ -50,24 +50,18 @@ impl Inode for ProcTaskInode {
let fs = procfs();
let inode_id = InodeId::from_fsid_and_inodeid(
fs.id(),
get_inode_id(&[&self.desc.tid().value().to_string(), name]),
get_inode_id(&[&self.tid.value().to_string(), name]),
);
if name == "fdinfo" {
return Ok(Arc::new(fd::ProcFdInode::new(self.desc, true, inode_id)));
return Ok(Arc::new(fd::ProcFdInode::new(self.tid, true, inode_id)));
} else if name == "fd" {
return Ok(Arc::new(fd::ProcFdInode::new(self.desc, false, inode_id)));
} else if name == "task"
&& self.desc.tid().value() == self.desc.tgid().value()
&& !self.is_task_dir
{
return Ok(Arc::new(task::ProcTaskDirInode::new(
self.desc.tgid(),
inode_id,
)));
return Ok(Arc::new(fd::ProcFdInode::new(self.tid, false, inode_id)));
} else if name == "task" && !self.is_task_dir {
return Ok(Arc::new(task::ProcTaskDirInode::new(self.tid, inode_id)));
}
if let Ok(file_type) = TaskFileType::try_from(name) {
Ok(Arc::new(ProcTaskFileInode::new(
self.desc.tid(),
self.tid,
file_type,
self.is_task_dir,
inode_id,
@@ -83,7 +77,7 @@ impl Inode for ProcTaskInode {
async fn readdir(&self, start_offset: u64) -> libkernel::error::Result<Box<dyn DirStream>> {
let mut entries: Vec<Dirent> = Vec::new();
let initial_str = self.desc.tid().value().to_string();
let initial_str = self.tid.value().to_string();
entries.push(Dirent::new(
"status".to_string(),
InodeId::from_fsid_and_inodeid(PROCFS_ID, get_inode_id(&[&initial_str, "status"])),
@@ -138,7 +132,7 @@ impl Inode for ProcTaskInode {
FileType::File,
9,
));
if self.desc.tid().value() == self.desc.tgid().value() && !self.is_task_dir {
if !self.is_task_dir {
entries.push(Dirent::new(
"task".to_string(),
InodeId::from_fsid_and_inodeid(PROCFS_ID, get_inode_id(&[&initial_str, "task"])),

View File

@@ -1,7 +1,6 @@
use crate::drivers::fs::proc::task::ProcTaskInode;
use crate::drivers::fs::proc::{get_inode_id, procfs};
use crate::process::thread_group::Tgid;
use crate::process::{TaskDescriptor, Tid, find_process_by_tgid, find_task_by_descriptor};
use crate::process::{Tid, find_task_by_tid};
use alloc::boxed::Box;
use alloc::string::ToString;
use alloc::sync::Arc;
@@ -14,11 +13,11 @@ use libkernel::fs::{DirStream, Dirent, FileType, Filesystem, Inode, InodeId, Sim
pub struct ProcTaskDirInode {
id: InodeId,
attr: FileAttr,
tgid: Tgid,
tid: Tid,
}
impl ProcTaskDirInode {
pub fn new(tgid: Tgid, inode_id: InodeId) -> Self {
pub fn new(tid: Tid, inode_id: InodeId) -> Self {
Self {
id: inode_id,
attr: FileAttr {
@@ -26,7 +25,7 @@ impl ProcTaskDirInode {
// Define appropriate file attributes for fdinfo.
..FileAttr::default()
},
tgid,
tid,
}
}
}
@@ -49,17 +48,14 @@ impl Inode for ProcTaskDirInode {
let fs = procfs();
let inode_id = InodeId::from_fsid_and_inodeid(
fs.id(),
get_inode_id(&[&self.tgid.value().to_string(), &tid.value().to_string()]),
get_inode_id(&[&self.tid.value().to_string(), &tid.value().to_string()]),
);
let desc = TaskDescriptor::from_tgid_tid(self.tgid, tid);
find_task_by_descriptor(&desc).ok_or(FsError::NotFound)?;
Ok(Arc::new(ProcTaskInode::new(desc, true, inode_id)))
find_task_by_tid(self.tid).ok_or(FsError::NotFound)?;
Ok(Arc::new(ProcTaskInode::new(self.tid, true, inode_id)))
}
async fn readdir(&self, start_offset: u64) -> libkernel::error::Result<Box<dyn DirStream>> {
let process = &find_process_by_tgid(self.tgid)
.ok_or(FsError::NotFound)?
.process;
let process = &find_task_by_tid(self.tid).ok_or(FsError::NotFound)?.process;
let tasks = process.tasks.lock_save_irq();
let mut entries = Vec::new();
for (i, (_tid, task)) in tasks.iter().enumerate().skip(start_offset as usize) {
@@ -68,10 +64,7 @@ impl Inode for ProcTaskDirInode {
};
let id = InodeId::from_fsid_and_inodeid(
procfs().id(),
get_inode_id(&[
&self.tgid.value().to_string(),
&task.tid.value().to_string(),
]),
get_inode_id(&[&self.tid.value().to_string(), &task.tid.value().to_string()]),
);
entries.push(Dirent {
id,

View File

@@ -1,4 +1,4 @@
use crate::process::{TASK_LIST, Tid, find_task_by_descriptor};
use crate::process::{Tid, find_task_by_tid};
use alloc::boxed::Box;
use alloc::format;
use alloc::string::{String, ToString};
@@ -81,18 +81,7 @@ impl SimpleFile for ProcTaskFileInode {
}
async fn read(&self) -> libkernel::error::Result<Vec<u8>> {
let tid = self.tid;
let task_list = TASK_LIST.lock_save_irq();
let id = task_list
.iter()
.find(|(desc, _)| desc.tid() == tid)
.map(|(desc, _)| *desc);
drop(task_list);
let task_details = if let Some(desc) = id {
find_task_by_descriptor(&desc)
} else {
None
};
let task_details = find_task_by_tid(self.tid);
let status_string = if let Some(task) = task_details {
let state = task.state.load(core::sync::atomic::Ordering::Relaxed);
@@ -216,56 +205,24 @@ Threads:\t{tasks}\n",
async fn readlink(&self) -> libkernel::error::Result<PathBuf> {
if let TaskFileType::Cwd = self.file_type {
let tid = self.tid;
let task_list = TASK_LIST.lock_save_irq();
let id = task_list
.iter()
.find(|(desc, _)| desc.tid() == tid)
.map(|(desc, _)| *desc);
drop(task_list);
let task_details = if let Some(desc) = id {
find_task_by_descriptor(&desc)
} else {
None
};
return if let Some(task) = task_details {
let task = find_task_by_tid(self.tid);
return if let Some(task) = task {
let cwd = task.cwd.lock_save_irq();
Ok(cwd.1.clone())
} else {
Err(FsError::NotFound.into())
};
} else if let TaskFileType::Root = self.file_type {
let tid = self.tid;
let task_list = TASK_LIST.lock_save_irq();
let id = task_list
.iter()
.find(|(desc, _)| desc.tid() == tid)
.map(|(desc, _)| *desc);
drop(task_list);
let task_details = if let Some(desc) = id {
find_task_by_descriptor(&desc)
} else {
None
};
return if let Some(task) = task_details {
let task = find_task_by_tid(self.tid);
return if let Some(task) = task {
let root = task.root.lock_save_irq();
Ok(root.1.clone())
} else {
Err(FsError::NotFound.into())
};
} else if let TaskFileType::Exe = self.file_type {
let tid = self.tid;
let task_list = TASK_LIST.lock_save_irq();
let id = task_list
.iter()
.find(|(desc, _)| desc.tid() == tid)
.map(|(desc, _)| *desc);
drop(task_list);
let task_details = if let Some(desc) = id {
find_task_by_descriptor(&desc)
} else {
None
};
let task_details = find_task_by_tid(self.tid);
return if let Some(task) = task_details {
if let Some(exe) = task.process.executable.lock_save_irq().clone() {
Ok(exe.as_str().to_string().into())

View File

@@ -140,7 +140,7 @@ mod tests {
}
let datetime = parts.next().unwrap(); // "Tue Feb 20 12:34:56 UTC 2024"
validate_datetime(datetime)
validate_datetime(datetime);
}
// Test that the version string is of the format "#1 Moss SMP Tue Feb 20 12:34:56 UTC 2024"

View File

@@ -4,11 +4,8 @@ use super::{
PageOffsetTranslator,
uaccess::{copy_obj_array_from_user, copy_to_user_slice},
};
use crate::process::find_process_by_tgid;
use crate::{
fs::syscalls::iov::IoVec,
process::thread_group::{Tgid, pid::PidT},
};
use crate::process::{Tid, find_task_by_tid};
use crate::{fs::syscalls::iov::IoVec, process::thread_group::pid::PidT};
use libkernel::{
error::{KernelError, Result},
memory::{PAGE_SIZE, address::TUA, proc_vm::vmarea::AccessKind},
@@ -22,8 +19,8 @@ pub async fn sys_process_vm_readv(
riov_count: usize,
_flags: usize,
) -> Result<usize> {
let tgid = Tgid::from_pid_t(pid);
let remote_proc = find_process_by_tgid(tgid).ok_or(KernelError::NoProcess)?;
let tgid = Tid::from_pid_t(pid);
let remote_proc = find_task_by_tid(tgid).ok_or(KernelError::NoProcess)?;
let local_iovs = copy_obj_array_from_user(local_iov, liov_count).await?;
let remote_iovs = copy_obj_array_from_user(remote_iov, riov_count).await?;

View File

@@ -2,7 +2,6 @@ use crate::{
memory::uaccess::{
UserCopyable, copy_from_user, copy_obj_array_from_user, copy_objs_to_user, copy_to_user,
},
process::TASK_LIST,
sched::syscall_ctx::ProcessCtx,
};
use libkernel::{
@@ -11,6 +10,8 @@ use libkernel::{
proc::caps::{Capabilities, CapabilitiesFlags},
};
use super::{Tid, find_task_by_tid, thread_group::pid::PidT};
const LINUX_CAPABILITY_VERSION_1: u32 = 0x19980330;
const LINUX_CAPABILITY_VERSION_3: u32 = 0x20080522;
@@ -18,7 +19,7 @@ const LINUX_CAPABILITY_VERSION_3: u32 = 0x20080522;
#[derive(Debug, Default, Clone, Copy)]
pub struct CapUserHeader {
version: u32,
pid: i32,
pid: PidT,
}
#[repr(C)]
@@ -59,11 +60,7 @@ pub async fn sys_capget(
let task = if header.pid == 0 {
ctx.shared().clone()
} else {
TASK_LIST
.lock_save_irq()
.iter()
.find(|task| task.0.tgid.value() == header.pid as u32)
.and_then(|task| task.1.upgrade())
find_task_by_tid(Tid::from_pid_t(header.pid))
.map(|x| (*x).clone())
.ok_or(KernelError::NoProcess)?
};
@@ -99,11 +96,7 @@ pub async fn sys_capset(
ctx.shared().clone()
} else {
caller_caps.check_capable(CapabilitiesFlags::CAP_SETPCAP)?;
TASK_LIST
.lock_save_irq()
.iter()
.find(|task| task.0.tgid.value() == header.pid as u32)
.and_then(|task| task.1.upgrade())
find_task_by_tid(Tid::from_pid_t(header.pid))
.map(|x| (*x).clone())
.ok_or(KernelError::NoProcess)?
};

View File

@@ -1,3 +1,4 @@
use super::Tid;
use super::owned::OwnedTask;
use super::ptrace::{PTrace, TracePoint, ptrace_stop};
use super::{
@@ -63,11 +64,19 @@ pub async fn sys_clone(
) -> Result<usize> {
let flags = CloneFlags::from_bits_truncate(flags);
let trace_point = if flags.contains(CloneFlags::CLONE_THREAD) {
TracePoint::Clone
} else {
TracePoint::Fork
};
// TODO: differentiate between `TracePoint::Fork`, `TracePoint::Clone` and
// `TracePoint::VFork`.
let should_trace_new_tsk = ptrace_stop(ctx, TracePoint::Fork).await;
let should_trace_new_tsk = ptrace_stop(ctx, trace_point).await;
let new_task = {
let tid = Tid::next_tid();
let current_task = ctx.task();
let mut user_ctx = *current_task.ctx.user();
@@ -80,7 +89,7 @@ pub async fn sys_clone(
user_ctx.tpid_el0 = tls as _;
}
let (tg, tid) = if flags.contains(CloneFlags::CLONE_THREAD) {
let tg = if flags.contains(CloneFlags::CLONE_THREAD) {
if !flags.contains(CloneFlags::CLONE_SIGHAND & CloneFlags::CLONE_VM) {
// CLONE_THREAD requires both CLONE_SIGHAND and CLONE_VM to be
// set.
@@ -88,11 +97,8 @@ pub async fn sys_clone(
}
user_ctx.sp_el0 = newsp.value() as _;
(
// A new task within this thread group.
current_task.process.clone(),
current_task.process.next_tid(),
)
// A new task within this thread group.
current_task.process.clone()
} else {
let tgid_parent = if flags.contains(CloneFlags::CLONE_PARENT) {
// Use the parent's parent as the new parent.
@@ -109,7 +115,7 @@ pub async fn sys_clone(
current_task.process.clone()
};
tgid_parent.new_child(flags.contains(CloneFlags::CLONE_SIGHAND))
tgid_parent.new_child(flags.contains(CloneFlags::CLONE_SIGHAND), tid)
};
let vm = if flags.contains(CloneFlags::CLONE_VM) {
@@ -191,7 +197,7 @@ pub async fn sys_clone(
TASK_LIST
.lock_save_irq()
.insert(desc, Arc::downgrade(&work));
.insert(desc.tid(), Arc::downgrade(&work));
work.process
.tasks

View File

@@ -79,7 +79,9 @@ pub fn do_exit_group(task: &Arc<Task>, exit_code: ChildState) {
parent.children.lock_save_irq().remove(&process.tgid);
parent.child_notifiers.child_update(process.tgid, exit_code);
parent
.child_notifiers
.child_update(task.descriptor().tgid(), exit_code);
parent
.pending_signals
@@ -136,7 +138,7 @@ pub async fn sys_exit(ctx: &mut ProcessCtx, exit_code: usize) -> Result<usize> {
.filter(|t| t.upgrade().is_some())
.count();
TASK_LIST.lock_save_irq().remove(&task.descriptor());
TASK_LIST.lock_save_irq().remove(&task.descriptor().tid());
if live_tasks <= 1 {
// We are the last task. This is equivalent to an exit_group. The exit

View File

@@ -15,7 +15,7 @@ use alloc::{
collections::btree_map::BTreeMap,
sync::{Arc, Weak},
};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use creds::Credentials;
use fd_table::FileDescriptorTable;
use libkernel::{
@@ -29,6 +29,7 @@ use libkernel::{
},
};
use ptrace::PTrace;
use thread_group::pid::PidT;
use thread_group::signal::{AtomicSigSet, SigId};
use thread_group::{Tgid, ThreadGroup};
@@ -46,6 +47,9 @@ pub mod sleep;
pub mod thread_group;
pub mod threading;
// the idle process (0) and the init process (1) are allocated manually.
static NEXT_TID: AtomicU32 = AtomicU32::new(2);
// Thread Id.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Tid(pub u32);
@@ -62,6 +66,14 @@ impl Tid {
fn idle_for_cpu() -> Tid {
Self(CpuId::this().value() as _)
}
pub fn next_tid() -> Self {
Self(NEXT_TID.fetch_add(1, Ordering::Relaxed))
}
pub fn from_pid_t(pid: PidT) -> Self {
Self(pid as _)
}
}
/// A unqiue identifier for any task in the current system.
@@ -309,20 +321,15 @@ impl Task {
}
}
pub fn find_task_by_descriptor(descriptor: &TaskDescriptor) -> Option<Arc<Work>> {
/// Finds a task by it's `Tid`.
pub fn find_task_by_tid(tid: Tid) -> Option<Arc<Work>> {
TASK_LIST
.lock_save_irq()
.get(descriptor)
.get(&tid)
.and_then(|x| x.upgrade())
}
/// Finds the root task for the given thread group
pub fn find_process_by_tgid(tgid: Tgid) -> Option<Arc<Work>> {
find_task_by_descriptor(&TaskDescriptor::from_tgid_tid(tgid, Tid::from_tgid(tgid)))
}
pub static TASK_LIST: SpinLock<BTreeMap<TaskDescriptor, Weak<Work>>> =
SpinLock::new(BTreeMap::new());
pub static TASK_LIST: SpinLock<BTreeMap<Tid, Weak<Work>>> = SpinLock::new(BTreeMap::new());
unsafe impl Send for Task {}
unsafe impl Sync for Task {}

View File

@@ -1,9 +1,12 @@
use super::thread_group::{ThreadGroup, wait::ChildState};
use super::{
Task, Tid, find_task_by_tid,
thread_group::{ThreadGroup, pid::PidT, wait::TraceTrap},
};
use crate::{
arch::{Arch, ArchImpl},
fs::syscalls::iov::IoVec,
memory::uaccess::{copy_from_user, copy_to_user},
process::{TASK_LIST, thread_group::signal::SigId},
process::thread_group::signal::SigId,
sched::syscall_ctx::ProcessCtx,
};
use alloc::sync::Arc;
@@ -146,7 +149,7 @@ impl PTrace {
}
/// Notify parents of a trap event.
pub fn notify_tracer_of_trap(&self, me: &Arc<ThreadGroup>) {
pub fn notify_tracer_of_trap(&self, task: &Arc<Task>) {
let Some(trap_signal) = (match self.state {
// For non-signal trace events, we use SIGTRAP.
Some(PTraceState::TracePointHit { hit_point, .. }) => match hit_point {
@@ -160,14 +163,11 @@ impl PTrace {
return;
};
// Notify the parent that we have stopped (SIGCHLD).
// Notify the tracer that we have stopped (SIGCHLD).
if let Some(tracer) = self.tracer.as_ref() {
tracer.child_notifiers.child_update(
me.tgid,
ChildState::TraceTrap {
signal: trap_signal,
mask: self.calc_trace_point_mask(),
},
tracer.child_notifiers.ptrace_notify(
task.tid,
TraceTrap::new(trap_signal, self.calc_trace_point_mask()),
);
tracer
@@ -272,7 +272,7 @@ pub async fn ptrace_stop(ctx: &ProcessCtx, point: TracePoint) -> bool {
notified = true;
ptrace.set_waker(cx.waker().clone());
ptrace.notify_tracer_of_trap(&task_sh.process);
ptrace.notify_tracer_of_trap(task_sh);
Poll::Pending
} else if matches!(ptrace.state, Some(PTraceState::Running)) {
// Tracer resumed us.
@@ -287,7 +287,7 @@ pub async fn ptrace_stop(ctx: &ProcessCtx, point: TracePoint) -> bool {
.await
}
pub async fn sys_ptrace(ctx: &ProcessCtx, op: i32, pid: u64, addr: UA, data: UA) -> Result<usize> {
pub async fn sys_ptrace(ctx: &ProcessCtx, op: i32, pid: PidT, addr: UA, data: UA) -> Result<usize> {
let op = PtraceOperation::try_from(op)?;
if op == PtraceOperation::TraceMe {
@@ -308,14 +308,7 @@ pub async fn sys_ptrace(ctx: &ProcessCtx, op: i32, pid: u64, addr: UA, data: UA)
return Ok(0);
}
let target_task = {
TASK_LIST
.lock_save_irq()
.iter()
.find(|(desc, _)| desc.tid.value() == pid as u32)
.and_then(|(_, task)| task.upgrade())
.ok_or(KernelError::NoProcess)?
};
let target_task = { find_task_by_tid(Tid::from_pid_t(pid)).ok_or(KernelError::NoProcess)? };
// TODO: Check CAP_SYS_PTRACE & security
match op {

View File

@@ -13,15 +13,12 @@ use alloc::{
};
use builder::ThreadGroupBuilder;
use core::sync::atomic::AtomicUsize;
use core::{
fmt::Display,
sync::atomic::{AtomicU32, Ordering},
};
use core::{fmt::Display, sync::atomic::Ordering};
use libkernel::fs::pathbuf::PathBuf;
use pid::PidT;
use rsrc_lim::ResourceLimits;
use signal::{SigId, SigSet, SignalActionState};
use wait::ChildNotifiers;
use wait::Notifiers;
pub mod builder;
pub mod pid;
@@ -55,7 +52,11 @@ impl Tgid {
Self(0)
}
pub fn from_pid_t(pid: PidT) -> Tgid {
fn from_tid(tid: Tid) -> Tgid {
Self(tid.0)
}
fn from_pid_t(pid: PidT) -> Tgid {
Self(pid as _)
}
}
@@ -108,36 +109,18 @@ pub struct ThreadGroup {
pub rsrc_lim: Arc<SpinLock<ResourceLimits>>,
pub pending_signals: SpinLock<SigSet>,
pub priority: SpinLock<i8>,
pub child_notifiers: ChildNotifiers,
pub child_notifiers: Notifiers,
pub utime: AtomicUsize,
pub stime: AtomicUsize,
pub last_account: AtomicUsize,
pub executable: SpinLock<Option<PathBuf>>,
next_tid: AtomicU32,
}
unsafe impl Send for ThreadGroup {}
impl ThreadGroup {
// Return the next available thread id. Will never return a thread whose ID
// == TGID, since that is defined as the main, root thread.
pub fn next_tid(&self) -> Tid {
let mut v = self.next_tid.fetch_add(1, Ordering::Relaxed);
// Skip the TGID.
if v == self.tgid.value() {
v = self.next_tid.fetch_add(1, Ordering::Relaxed);
}
Tid(v)
}
pub fn next_tgid() -> Tgid {
Tgid(NEXT_TGID.fetch_add(1, Ordering::SeqCst))
}
pub fn new_child(self: Arc<Self>, share_state: bool) -> (Arc<ThreadGroup>, Tid) {
let mut builder = ThreadGroupBuilder::new(Self::next_tgid()).with_parent(self.clone());
pub fn new_child(self: Arc<Self>, share_state: bool, tid: Tid) -> Arc<ThreadGroup> {
let mut builder = ThreadGroupBuilder::new(Tgid::from_tid(tid)).with_parent(self.clone());
if share_state {
builder = builder
@@ -159,7 +142,7 @@ impl ThreadGroup {
.lock_save_irq()
.insert(new_tg.tgid, new_tg.clone());
(new_tg.clone(), Tid(new_tg.tgid.value()))
new_tg.clone()
}
pub fn get(id: Tgid) -> Option<Arc<Self>> {
@@ -221,7 +204,4 @@ impl Drop for ThreadGroup {
}
}
// the idle process (0) and the init process (1) are allocated manually.
static NEXT_TGID: AtomicU32 = AtomicU32::new(2);
static TG_LIST: SpinLock<BTreeMap<Tgid, Weak<ThreadGroup>>> = SpinLock::new(BTreeMap::new());

View File

@@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, AtomicUsize};
use core::sync::atomic::AtomicUsize;
use alloc::{collections::btree_map::BTreeMap, sync::Arc};
@@ -8,7 +8,7 @@ use super::{
Pgid, ProcessState, Sid, TG_LIST, Tgid, ThreadGroup,
rsrc_lim::ResourceLimits,
signal::{SigSet, SignalActionState},
wait::ChildNotifiers,
wait::Notifiers,
};
/// A builder for creating ThreadGroup instances.
@@ -79,7 +79,7 @@ impl ThreadGroupBuilder {
.rsrc_lim
.unwrap_or_else(|| Arc::new(SpinLock::new(ResourceLimits::default()))),
pending_signals: SpinLock::new(SigSet::empty()),
child_notifiers: ChildNotifiers::new(),
child_notifiers: Notifiers::new(),
priority: SpinLock::new(self.pri.unwrap_or(0)),
utime: AtomicUsize::new(0),
stime: AtomicUsize::new(0),
@@ -87,7 +87,6 @@ impl ThreadGroupBuilder {
// Don't start from '0'. Since clone expects the parent to return
// the tid and the child to return '0', if we started from '0' we
// couldn't then differentiate between a child and a parent.
next_tid: AtomicU32::new(1),
state: SpinLock::new(ProcessState::Running),
tasks: SpinLock::new(BTreeMap::new()),
executable: SpinLock::new(None),

View File

@@ -3,7 +3,8 @@ use libkernel::error::{KernelError, Result};
use crate::sched::syscall_ctx::ProcessCtx;
use core::convert::Infallible;
use super::{Pgid, Tgid, ThreadGroup};
use super::Pgid;
use crate::process::{Tid, find_task_by_tid};
/// Userspace `pid_t` type.
pub type PidT = i32;
@@ -27,8 +28,8 @@ pub fn sys_getppid(ctx: &ProcessCtx) -> core::result::Result<usize, Infallible>
pub fn sys_getpgid(ctx: &ProcessCtx, pid: PidT) -> Result<usize> {
let pgid = if pid == 0 {
*ctx.shared().process.pgid.lock_save_irq()
} else if let Some(tg) = ThreadGroup::get(Tgid::from_pid_t(pid)) {
*tg.pgid.lock_save_irq()
} else if let Some(task) = find_task_by_tid(Tid::from_pid_t(pid)) {
*task.process.pgid.lock_save_irq()
} else {
return Err(KernelError::NoProcess);
};
@@ -39,8 +40,8 @@ pub fn sys_getpgid(ctx: &ProcessCtx, pid: PidT) -> Result<usize> {
pub fn sys_setpgid(ctx: &ProcessCtx, pid: PidT, pgid: Pgid) -> Result<usize> {
if pid == 0 {
*ctx.shared().process.pgid.lock_save_irq() = pgid;
} else if let Some(tg) = ThreadGroup::get(Tgid::from_pid_t(pid)) {
*tg.pgid.lock_save_irq() = pgid;
} else if let Some(task) = find_task_by_tid(Tid::from_pid_t(pid)) {
*task.process.pgid.lock_save_irq() = pgid;
} else {
return Err(KernelError::NoProcess);
};

View File

@@ -5,7 +5,7 @@ use libkernel::{
use crate::{
memory::uaccess::{UserCopyable, copy_from_user, copy_to_user},
process::thread_group::{TG_LIST, Tgid},
process::{Tid, find_task_by_tid},
sched::syscall_ctx::ProcessCtx,
};
@@ -195,10 +195,8 @@ pub async fn sys_prlimit64(
let task = if pid == 0 {
ctx.shared().process.clone()
} else {
TG_LIST
.lock_save_irq()
.get(&Tgid::from_pid_t(pid))
.and_then(|x| x.upgrade())
find_task_by_tid(Tid::from_pid_t(pid))
.map(|x| x.process.clone())
.ok_or(KernelError::NoProcess)?
};

View File

@@ -1,11 +1,12 @@
use super::{
Pgid, Tgid, ThreadGroup,
pid::PidT,
signal::{InterruptResult, Interruptable, SigId},
};
use crate::clock::timespec::TimeSpec;
use crate::memory::uaccess::{UserCopyable, copy_to_user};
use crate::sched::syscall_ctx::ProcessCtx;
use crate::sync::CondVar;
use crate::{clock::timespec::TimeSpec, process::Tid};
use alloc::collections::btree_map::BTreeMap;
use bitflags::Flags;
use libkernel::sync::condvar::WakeupType;
@@ -14,8 +15,6 @@ use libkernel::{
memory::address::TUA,
};
pub type PidT = i32;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct RUsage {
@@ -75,10 +74,27 @@ pub enum ChildState {
NormalExit { code: u32 },
SignalExit { signal: SigId, core: bool },
Stop { signal: SigId },
TraceTrap { signal: SigId, mask: i32 },
Continue,
}
#[derive(Clone, Copy, Debug)]
pub struct TraceTrap {
signal: SigId,
mask: i32,
}
impl TraceTrap {
pub fn new(signal: SigId, mask: i32) -> Self {
Self { signal, mask }
}
}
#[derive(Clone, Copy, Debug)]
enum WaitEvent {
Child(ChildState),
Ptrace(TraceTrap),
}
impl ChildState {
fn matches_wait_flags(&self, flags: WaitFlags) -> bool {
match self {
@@ -86,33 +102,55 @@ impl ChildState {
flags.contains(WaitFlags::WEXITED)
}
ChildState::Stop { .. } => flags.contains(WaitFlags::WSTOPPED),
// Always wake up on a trace trap.
ChildState::TraceTrap { .. } => true,
ChildState::Continue => flags.contains(WaitFlags::WCONTINUED),
}
}
}
pub struct ChildNotifiers {
inner: CondVar<BTreeMap<Tgid, ChildState>>,
struct NotifierState {
children: BTreeMap<Tgid, ChildState>,
ptrace: BTreeMap<Tid, TraceTrap>,
}
impl Default for ChildNotifiers {
impl NotifierState {
fn new() -> Self {
Self {
children: BTreeMap::new(),
ptrace: BTreeMap::new(),
}
}
}
pub struct Notifiers {
inner: CondVar<NotifierState>,
}
impl Default for Notifiers {
fn default() -> Self {
Self::new()
}
}
impl ChildNotifiers {
impl Notifiers {
pub fn new() -> Self {
Self {
inner: CondVar::new(BTreeMap::new()),
inner: CondVar::new(NotifierState::new()),
}
}
pub fn child_update(&self, tgid: Tgid, new_state: ChildState) {
self.inner.update(|state| {
state.insert(tgid, new_state);
state.children.insert(tgid, new_state);
// Since some wakers may be conditional upon state update changes,
// notify everyone whenever a child updates it's state.
WakeupType::All
});
}
pub fn ptrace_notify(&self, tid: Tid, ptrace_trap: TraceTrap) {
self.inner.update(|state| {
state.ptrace.insert(tid, ptrace_trap);
// Since some wakers may be conditional upon state update changes,
// notify everyone whenever a child updates it's state.
@@ -121,13 +159,14 @@ impl ChildNotifiers {
}
}
fn do_wait(
state: &mut BTreeMap<Tgid, ChildState>,
fn find_child_event(
children: &mut BTreeMap<Tgid, ChildState>,
pid: PidT,
flags: WaitFlags,
) -> Option<(Tgid, ChildState)> {
remove_entry: bool,
) -> Option<(PidT, WaitEvent)> {
let key = if pid == -1 {
state.iter().find_map(|(k, v)| {
children.iter().find_map(|(k, v)| {
if v.matches_wait_flags(flags) {
Some(*k)
} else {
@@ -137,7 +176,7 @@ fn do_wait(
} else if pid < -1 {
// Wait for any child whose process group ID matches abs(pid)
let target_pgid = Pgid((-pid) as u32);
state.iter().find_map(|(k, v)| {
children.iter().find_map(|(k, v)| {
if !v.matches_wait_flags(flags) {
return None;
}
@@ -152,7 +191,7 @@ fn do_wait(
}
})
} else {
state
children
.get_key_value(&Tgid::from_pid_t(pid))
.and_then(|(k, v)| {
if v.matches_wait_flags(flags) {
@@ -163,52 +202,53 @@ fn do_wait(
})
}?;
Some(state.remove_entry(&key).unwrap())
if remove_entry {
children
.remove_entry(&key)
.map(|(k, v)| (k.value() as PidT, WaitEvent::Child(v)))
} else {
children
.get(&key)
.map(|v| (key.value() as PidT, WaitEvent::Child(*v)))
}
}
// Non-consuming wait finder to support WNOWAIT
fn find_waitable(
state: &BTreeMap<Tgid, ChildState>,
fn find_ptrace_event(
ptrace: &mut BTreeMap<Tid, TraceTrap>,
pid: PidT,
flags: WaitFlags,
) -> Option<(Tgid, ChildState)> {
remove_entry: bool,
) -> Option<(PidT, WaitEvent)> {
// Ptrace events are always eligible for collection regardless of wait
// flags. The WSTOPPED/WUNTRACED filtering only governs non-traced
// group-stop events in the children map.
let key = if pid == -1 {
state.iter().find_map(|(k, v)| {
if v.matches_wait_flags(flags) {
Some(*k)
} else {
None
}
})
ptrace.keys().next().copied()
} else if pid < -1 {
let target_pgid = Pgid((-pid) as u32);
state.iter().find_map(|(k, v)| {
if !v.matches_wait_flags(flags) {
return None;
}
if let Some(tg) = ThreadGroup::get(*k) {
if *tg.pgid.lock_save_irq() == target_pgid {
Some(*k)
} else {
None
}
} else {
None
}
})
// TODO: pgid matching for ptrace events
None
} else {
state
.get_key_value(&Tgid::from_pid_t(pid))
.and_then(|(k, v)| {
if v.matches_wait_flags(flags) {
Some(*k)
} else {
None
}
})
let tid = Tid::from_pid_t(pid);
ptrace.contains_key(&tid).then_some(tid)
}?;
state.get(&key).map(|v| (key, *v))
let event = if remove_entry {
ptrace.remove(&key)?
} else {
*ptrace.get(&key)?
};
Some((key.value() as PidT, WaitEvent::Ptrace(event)))
}
fn find_event(
state: &mut NotifierState,
pid: PidT,
flags: WaitFlags,
remove_entry: bool,
) -> Option<(PidT, WaitEvent)> {
// Ptrace events are always eligible and take priority.
find_ptrace_event(&mut state.ptrace, pid, remove_entry)
.or_else(|| find_child_event(&mut state.children, pid, flags, remove_entry))
}
pub async fn sys_wait4(
@@ -251,13 +291,13 @@ pub async fn sys_wait4(
let child_proc_count = task.process.children.lock_save_irq().iter().count();
let (tgid, child_state) = if child_proc_count == 0 || flags.contains(WaitFlags::WNOHANG) {
let (ret_pid, event) = if child_proc_count == 0 || flags.contains(WaitFlags::WNOHANG) {
// Special case for no children. See if there are any pending child
// notification events without sleeping. If there are no children and no
// pending events, return ECHILD.
let mut ret = None;
task.process.child_notifiers.inner.update(|s| {
ret = do_wait(s, pid, flags);
ret = find_event(s, pid, flags, true);
WakeupType::None
});
@@ -271,7 +311,7 @@ pub async fn sys_wait4(
.process
.child_notifiers
.inner
.wait_until(|state| do_wait(state, pid, flags))
.wait_until(|state| find_event(state, pid, flags, true))
.interruptable()
.await
{
@@ -281,34 +321,34 @@ pub async fn sys_wait4(
};
if !stat_addr.is_null() {
match child_state {
ChildState::NormalExit { code } => {
match event {
WaitEvent::Child(ChildState::NormalExit { code }) => {
copy_to_user(stat_addr, (code as i32 & 0xff) << 8).await?;
}
ChildState::SignalExit { signal, core } => {
WaitEvent::Child(ChildState::SignalExit { signal, core }) => {
copy_to_user(
stat_addr,
(signal.user_id() as i32) | if core { 0x80 } else { 0x0 },
)
.await?;
}
ChildState::Stop { signal } => {
WaitEvent::Child(ChildState::Stop { signal }) => {
copy_to_user(stat_addr, ((signal.user_id() as i32) << 8) | 0x7f).await?;
}
ChildState::TraceTrap { signal, mask } => {
WaitEvent::Ptrace(TraceTrap { signal, mask }) => {
copy_to_user(
stat_addr,
((signal.user_id() as i32) << 8) | 0x7f | mask << 8,
)
.await?;
}
ChildState::Continue => {
WaitEvent::Child(ChildState::Continue) => {
copy_to_user(stat_addr, 0xffff).await?;
}
}
}
Ok(tgid.value() as _)
Ok(ret_pid as _)
}
// idtype for waitid
@@ -368,18 +408,17 @@ pub async fn sys_waitid(
};
let task = ctx.shared();
let child_proc_count = task.process.children.lock_save_irq().iter().count();
// Try immediate check if no children or WNOHANG
let child_state = if child_proc_count == 0 || flags.contains(WaitFlags::WNOHANG) {
let mut ret: Option<ChildState> = None;
let event = if child_proc_count == 0 || flags.contains(WaitFlags::WNOHANG) {
let mut ret: Option<WaitEvent> = None;
task.process.child_notifiers.inner.update(|s| {
// Use non-consuming finder for WNOWAIT, else consume
ret = if flags.contains(WaitFlags::WNOWAIT) {
find_waitable(s, sel_pid, flags).map(|(_, state)| state)
} else {
do_wait(s, sel_pid, flags).map(|(_, state)| state)
};
// Don't consume on WNOWAIT.
ret = find_event(s, sel_pid, flags, !flags.contains(WaitFlags::WNOWAIT))
.map(|(_, event)| event);
WakeupType::None
});
@@ -390,19 +429,15 @@ pub async fn sys_waitid(
}
} else {
// Wait until a child matches; first find key, then remove conditionally
let (_, state) = task
.process
task.process
.child_notifiers
.inner
.wait_until(|s| {
if flags.contains(WaitFlags::WNOWAIT) {
find_waitable(s, sel_pid, flags)
} else {
do_wait(s, sel_pid, flags)
}
// Don't consume on WNOWAIT.
find_event(s, sel_pid, flags, !flags.contains(WaitFlags::WNOWAIT))
})
.await;
state
.await
.1
};
// Populate siginfo
@@ -412,24 +447,24 @@ pub async fn sys_waitid(
code: 0,
errno: 0,
};
match child_state {
ChildState::NormalExit { code } => {
match event {
WaitEvent::Child(ChildState::NormalExit { code }) => {
siginfo.code = CLD_EXITED;
siginfo.errno = code as i32;
}
ChildState::SignalExit { signal, core } => {
WaitEvent::Child(ChildState::SignalExit { signal, core }) => {
siginfo.code = if core { CLD_DUMPED } else { CLD_KILLED };
siginfo.errno = signal.user_id() as i32;
}
ChildState::Stop { signal } => {
WaitEvent::Child(ChildState::Stop { signal }) => {
siginfo.code = CLD_STOPPED;
siginfo.errno = signal.user_id() as i32;
}
ChildState::TraceTrap { signal, .. } => {
WaitEvent::Ptrace(TraceTrap { signal, .. }) => {
siginfo.code = CLD_TRAPPED;
siginfo.errno = signal.user_id() as i32;
}
ChildState::Continue => {
WaitEvent::Child(ChildState::Continue) => {
siginfo.code = CLD_CONTINUED;
}
}

View File

@@ -249,7 +249,10 @@ pub fn sched_init() {
{
let mut task_list = TASK_LIST.lock_save_irq();
task_list.insert(init_work.task.descriptor(), Arc::downgrade(&init_work));
task_list.insert(
init_work.task.descriptor().tid(),
Arc::downgrade(&init_work),
);
}
insert_work(init_work);

View File

@@ -192,7 +192,7 @@ pub fn dispatch_userspace_task(frame: *mut UserCtx) {
let mut ptrace = ctx.task().ptrace.lock_save_irq();
if ptrace.trace_signal(signal, ctx.task().ctx.user()) {
ptrace.set_waker(current_work_waker());
ptrace.notify_tracer_of_trap(&ctx.task().process);
ptrace.notify_tracer_of_trap(ctx.shared());
drop(ptrace);
if current_work().state.try_pending_stop() {