rcore-tutorial/os/src/task/mod.rs

160 lines
5.5 KiB
Rust
Raw Normal View History

mod context;
2021-09-28 01:21:59 +08:00
mod id;
2022-01-22 12:40:54 -08:00
mod manager;
2021-09-28 01:21:59 +08:00
mod process;
2022-01-22 12:40:54 -08:00
mod processor;
mod signal;
2022-01-22 12:40:54 -08:00
mod switch;
2022-01-24 23:23:03 -08:00
#[allow(clippy::module_inception)]
2022-01-22 12:40:54 -08:00
mod task;
2022-05-12 23:14:42 -07:00
use self::id::TaskUserRes;
use crate::fs::{open_file, OpenFlags};
2023-03-29 21:16:34 +08:00
use crate::sbi::shutdown;
2022-05-12 23:14:42 -07:00
use alloc::{sync::Arc, vec::Vec};
use lazy_static::*;
2022-01-22 12:40:54 -08:00
use manager::fetch_task;
2021-09-28 01:21:59 +08:00
use process::ProcessControlBlock;
2022-01-22 12:40:54 -08:00
use switch::__switch;
2021-09-28 01:21:59 +08:00
pub use context::TaskContext;
2022-05-13 18:19:17 +08:00
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle, IDLE_PID};
2023-03-30 22:53:41 +08:00
pub use manager::{add_task, pid2process, remove_from_pid2process, wakeup_task};
2020-12-08 15:37:10 +08:00
pub use processor::{
2022-01-22 12:40:54 -08:00
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
2020-12-08 15:37:10 +08:00
};
pub use signal::SignalFlags;
pub use task::{TaskControlBlock, TaskStatus};
pub fn suspend_current_and_run_next() {
2020-12-08 15:37:10 +08:00
// There must be an application running.
2020-12-10 11:57:26 +08:00
let task = take_current_task().unwrap();
// ---- access current TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
2020-12-10 11:57:26 +08:00
// Change status to Ready
2021-02-16 20:50:24 +08:00
task_inner.task_status = TaskStatus::Ready;
drop(task_inner);
2021-09-30 10:09:21 -07:00
// ---- release current TCB
2020-12-10 11:57:26 +08:00
2020-12-08 15:37:10 +08:00
// push back to ready queue.
add_task(task);
// jump to scheduling cycle
schedule(task_cx_ptr);
}
2022-03-04 09:02:32 -08:00
/// This function must be followed by a schedule
pub fn block_current_task() -> *mut TaskContext {
2021-10-07 14:33:44 -07:00
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
2023-01-19 13:59:06 +08:00
task_inner.task_status = TaskStatus::Blocked;
2022-03-04 09:02:32 -08:00
&mut task_inner.task_cx as *mut TaskContext
}
pub fn block_current_and_run_next() {
2022-05-14 22:53:45 +08:00
let task_cx_ptr = block_current_task();
2021-10-07 14:33:44 -07:00
schedule(task_cx_ptr);
}
2023-03-29 20:51:50 +08:00
/// Exit the current 'Running' task and run the next task in task list.
pub fn exit_current_and_run_next(exit_code: i32) {
2020-12-10 11:57:26 +08:00
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
2021-09-30 10:09:21 -07:00
let process = task.process.upgrade().unwrap();
let tid = task_inner.res.as_ref().unwrap().tid;
// record exit code
task_inner.exit_code = Some(exit_code);
task_inner.res = None;
// here we do not remove the thread since we are still using the kstack
// it will be deallocated when sys_waittid is called
drop(task_inner);
drop(task);
// however, if this is the main thread of current process
// the process should terminate at once
2021-09-30 10:09:21 -07:00
if tid == 0 {
2022-05-13 18:19:17 +08:00
let pid = process.getpid();
if pid == IDLE_PID {
2022-05-15 12:03:05 +08:00
println!(
"[kernel] Idle process exit with exit_code {} ...",
exit_code
);
if exit_code != 0 {
//crate::sbi::shutdown(255); //255 == -1 for err hint
2023-03-29 21:16:34 +08:00
shutdown(true);
2022-05-15 12:03:05 +08:00
} else {
//crate::sbi::shutdown(0); //0 for success hint
2023-03-29 21:16:34 +08:00
shutdown(false);
2022-05-15 12:03:05 +08:00
}
2022-05-13 18:19:17 +08:00
}
remove_from_pid2process(pid);
let mut process_inner = process.inner_exclusive_access();
2021-09-30 10:09:21 -07:00
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = exit_code;
2020-12-10 11:57:26 +08:00
2021-09-30 10:09:21 -07:00
{
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
2022-01-22 12:40:54 -08:00
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
2021-09-30 10:09:21 -07:00
initproc_inner.children.push(child.clone());
}
2020-12-10 11:57:26 +08:00
}
// deallocate user res (including tid/trap_cx/ustack) of all threads
// it has to be done before we dealloc the whole memory_set
// otherwise they will be deallocated twice
2022-05-12 23:14:42 -07:00
let mut recycle_res = Vec::<TaskUserRes>::new();
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
let task = task.as_ref().unwrap();
let mut task_inner = task.inner_exclusive_access();
2022-05-12 23:14:42 -07:00
if let Some(res) = task_inner.res.take() {
recycle_res.push(res);
}
}
2022-05-12 23:14:42 -07:00
// dealloc_tid and dealloc_user_res require access to PCB inner, so we
// need to collect those user res first, then release process_inner
// for now to avoid deadlock/double borrow problem.
drop(process_inner);
recycle_res.clear();
2022-05-12 23:14:42 -07:00
let mut process_inner = process.inner_exclusive_access();
2021-09-30 10:09:21 -07:00
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
2021-09-30 10:09:21 -07:00
process_inner.memory_set.recycle_data_pages();
2022-01-22 12:40:54 -08:00
// drop file descriptors
process_inner.fd_table.clear();
2021-09-30 10:09:21 -07:00
}
drop(process);
2020-12-10 11:57:26 +08:00
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
schedule(&mut _unused as *mut _);
}
lazy_static! {
2021-09-30 10:09:21 -07:00
pub static ref INITPROC: Arc<ProcessControlBlock> = {
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
2021-09-30 10:09:21 -07:00
ProcessControlBlock::new(v.as_slice())
};
}
2020-12-10 11:57:26 +08:00
pub fn add_initproc() {
let _initproc = INITPROC.clone();
}
pub fn check_signals_of_current() -> Option<(i32, &'static str)> {
let process = current_process();
let process_inner = process.inner_exclusive_access();
process_inner.signals.check_error()
}
pub fn current_add_signal(signal: SignalFlags) {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
process_inner.signals |= signal;
}