Small Fix && cargo fmt

This commit is contained in:
Yifan Wu 2022-01-22 12:40:54 -08:00
parent c9583b0f53
commit ae3ba9c26f
83 changed files with 1085 additions and 1079 deletions

View file

@ -17,26 +17,24 @@ pub const CLOCK_FREQ: usize = 403000000 / 62;
pub const CLOCK_FREQ: usize = 12500000;
#[cfg(feature = "board_qemu")]
pub const MMIO: &[(usize, usize)] = &[
(0x10001000, 0x1000),
];
pub const MMIO: &[(usize, usize)] = &[(0x10001000, 0x1000)];
#[cfg(feature = "board_k210")]
pub const MMIO: &[(usize, usize)] = &[
// we don't need clint in S priv when running
// we only need claim/complete for target0 after initializing
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];

View file

@ -1,5 +1,5 @@
use core::fmt::{self, Write};
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
struct Stdout;
@ -29,5 +29,3 @@ macro_rules! println {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
}
}

View file

@ -1,9 +1,9 @@
mod virtio_blk;
mod sdcard;
mod virtio_blk;
use lazy_static::*;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
use lazy_static::*;
#[cfg(feature = "board_qemu")]
type BlockDeviceImpl = virtio_blk::VirtIOBlock;
@ -21,10 +21,12 @@ pub fn block_device_test() {
let mut write_buffer = [0u8; 512];
let mut read_buffer = [0u8; 512];
for i in 0..512 {
for byte in write_buffer.iter_mut() { *byte = i as u8; }
for byte in write_buffer.iter_mut() {
*byte = i as u8;
}
block_device.write_block(i as usize, &write_buffer);
block_device.read_block(i as usize, &mut read_buffer);
assert_eq!(write_buffer, read_buffer);
}
println!("block device test passed!");
}
}

View file

@ -2,21 +2,21 @@
#![allow(non_camel_case_types)]
#![allow(unused)]
use k210_pac::{Peripherals, SPI0};
use super::BlockDevice;
use crate::sync::UPSafeCell;
use core::convert::TryInto;
use k210_hal::prelude::*;
use k210_pac::{Peripherals, SPI0};
use k210_soc::{
fpioa::{self, io},
//dmac::{dma_channel, DMAC, DMACExt},
gpio,
gpiohs,
spi::{aitm, frame_format, tmod, work_mode, SPI, SPIExt, SPIImpl},
fpioa::{self, io},
sysctl,
sleep::usleep,
spi::{aitm, frame_format, tmod, work_mode, SPIExt, SPIImpl, SPI},
sysctl,
};
use crate::sync::UPSafeCell;
use lazy_static::*;
use super::BlockDevice;
use core::convert::TryInto;
pub struct SDCard<SPI> {
spi: SPI,
@ -160,7 +160,11 @@ pub struct SDCardInfo {
}
impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
pub fn new(spi: X, spi_cs: u32, cs_gpionum: u8/*, dmac: &'a DMAC, channel: dma_channel*/) -> Self {
pub fn new(
spi: X,
spi_cs: u32,
cs_gpionum: u8, /*, dmac: &'a DMAC, channel: dma_channel*/
) -> Self {
Self {
spi,
spi_cs,
@ -606,7 +610,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
}
let mut error = false;
//let mut dma_chunk = [0u32; SEC_LEN];
let mut tmp_chunk= [0u8; SEC_LEN];
let mut tmp_chunk = [0u8; SEC_LEN];
for chunk in data_buf.chunks_mut(SEC_LEN) {
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
error = true;
@ -616,7 +620,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
//self.read_data_dma(&mut dma_chunk);
self.read_data(&mut tmp_chunk);
/* Place the data received as u32 units from DMA into the u8 target buffer */
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/tmp_chunk.iter()) {
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/ tmp_chunk.iter()) {
//*a = (b & 0xff) as u8;
*a = *b;
}
@ -675,7 +679,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Send the data token to signify the start of the data */
self.write_data(&frame);
/* Write the block data to SD : write count data by block */
for (a, &b) in /*dma_chunk*/tmp_chunk.iter_mut().zip(chunk.iter()) {
for (a, &b) in /*dma_chunk*/ tmp_chunk.iter_mut().zip(chunk.iter()) {
//*a = b.into();
*a = b;
}
@ -711,9 +715,8 @@ fn io_init() {
}
lazy_static! {
static ref PERIPHERALS: UPSafeCell<Peripherals> = unsafe {
UPSafeCell::new(Peripherals::take().unwrap())
};
static ref PERIPHERALS: UPSafeCell<Peripherals> =
unsafe { UPSafeCell::new(Peripherals::take().unwrap()) };
}
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
@ -747,9 +750,15 @@ impl SDCardWrapper {
impl BlockDevice for SDCardWrapper {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access().read_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.read_sector(buf, block_id as u32)
.unwrap();
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access().write_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.write_sector(buf, block_id as u32)
.unwrap();
}
}
}

View file

@ -1,20 +1,12 @@
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
use crate::mm::{
PhysAddr,
VirtAddr,
frame_alloc,
frame_dealloc,
PhysPageNum,
FrameTracker,
StepByOne,
PageTable,
kernel_token,
};
use super::BlockDevice;
use crate::mm::{
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
@ -22,21 +14,21 @@ const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! {
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
UPSafeCell::new(Vec::new())
};
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
self.0
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
self.0
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
@ -44,9 +36,9 @@ impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
unsafe {
Self(UPSafeCell::new(VirtIOBlk::new(
&mut *(VIRTIO0 as *mut VirtIOHeader)
).unwrap()))
Self(UPSafeCell::new(
VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(),
))
}
}
}
@ -56,7 +48,9 @@ pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 { ppn_base = frame.ppn; }
if i == 0 {
ppn_base = frame.ppn;
}
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
@ -80,5 +74,7 @@ pub extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
#[no_mangle]
pub extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
PageTable::from_token(kernel_token()).translate_va(vaddr).unwrap()
PageTable::from_token(kernel_token())
.translate_va(vaddr)
.unwrap()
}

View file

@ -1,3 +1,3 @@
mod block;
pub use block::BLOCK_DEVICE;
pub use block::BLOCK_DEVICE;

View file

@ -1,15 +1,12 @@
use easy_fs::{
EasyFileSystem,
Inode,
};
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::Arc;
use lazy_static::*;
use bitflags::*;
use alloc::vec::Vec;
use super::File;
use crate::mm::UserBuffer;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
pub struct OSInode {
readable: bool,
@ -23,18 +20,11 @@ pub struct OSInodeInner {
}
impl OSInode {
pub fn new(
readable: bool,
writable: bool,
inode: Arc<Inode>,
) -> Self {
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
Self {
readable,
writable,
inner: unsafe { UPSafeCell::new(OSInodeInner {
offset: 0,
inode,
})},
inner: unsafe { UPSafeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
pub fn read_all(&self) -> Vec<u8> {
@ -98,40 +88,30 @@ pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
if let Some(inode) = ROOT_INODE.find(name) {
// clear size
inode.clear();
Some(Arc::new(OSInode::new(
readable,
writable,
inode,
)))
Some(Arc::new(OSInode::new(readable, writable, inode)))
} else {
// create file
ROOT_INODE.create(name)
.map(|inode| {
Arc::new(OSInode::new(
readable,
writable,
inode,
))
})
ROOT_INODE
.create(name)
.map(|inode| Arc::new(OSInode::new(readable, writable, inode)))
}
} else {
ROOT_INODE.find(name)
.map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(
readable,
writable,
inode
))
})
ROOT_INODE.find(name).map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(readable, writable, inode))
})
}
}
impl File for OSInode {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, mut buf: UserBuffer) -> usize {
let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize;
@ -156,4 +136,4 @@ impl File for OSInode {
}
total_write_size
}
}
}

View file

@ -1,16 +1,16 @@
mod inode;
mod pipe;
mod stdio;
mod inode;
use crate::mm::UserBuffer;
pub trait File : Send + Sync {
pub trait File: Send + Sync {
fn readable(&self) -> bool;
fn writable(&self) -> bool;
fn read(&self, buf: UserBuffer) -> usize;
fn write(&self, buf: UserBuffer) -> usize;
}
pub use pipe::{Pipe, make_pipe};
pub use inode::{list_apps, open_file, OSInode, OpenFlags};
pub use pipe::{make_pipe, Pipe};
pub use stdio::{Stdin, Stdout};
pub use inode::{OSInode, open_file, OpenFlags, list_apps};

View file

@ -1,7 +1,7 @@
use super::File;
use alloc::sync::{Arc, Weak};
use crate::sync::UPSafeCell;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::{Arc, Weak};
use crate::task::suspend_current_and_run_next;
@ -98,22 +98,20 @@ impl PipeRingBuffer {
/// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(unsafe {
UPSafeCell::new(PipeRingBuffer::new())
});
let read_end = Arc::new(
Pipe::read_end_with_buffer(buffer.clone())
);
let write_end = Arc::new(
Pipe::write_end_with_buffer(buffer.clone())
);
let buffer = Arc::new(unsafe { UPSafeCell::new(PipeRingBuffer::new()) });
let read_end = Arc::new(Pipe::read_end_with_buffer(buffer.clone()));
let write_end = Arc::new(Pipe::write_end_with_buffer(buffer.clone()));
buffer.exclusive_access().set_write_end(&write_end);
(read_end, write_end)
}
impl File for Pipe {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, buf: UserBuffer) -> usize {
assert_eq!(self.readable(), true);
let mut buf_iter = buf.into_iter();
@ -132,7 +130,9 @@ impl File for Pipe {
// read at most loop_read bytes
for _ in 0..loop_read {
if let Some(byte_ref) = buf_iter.next() {
unsafe { *byte_ref = ring_buffer.read_byte(); }
unsafe {
*byte_ref = ring_buffer.read_byte();
}
read_size += 1;
} else {
return read_size;

View file

@ -1,5 +1,5 @@
use super::File;
use crate::mm::{UserBuffer};
use crate::mm::UserBuffer;
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
@ -8,8 +8,12 @@ pub struct Stdin;
pub struct Stdout;
impl File for Stdin {
fn readable(&self) -> bool { true }
fn writable(&self) -> bool { false }
fn readable(&self) -> bool {
true
}
fn writable(&self) -> bool {
false
}
fn read(&self, mut user_buf: UserBuffer) -> usize {
assert_eq!(user_buf.len(), 1);
// busy loop
@ -24,7 +28,9 @@ impl File for Stdin {
}
}
let ch = c as u8;
unsafe { user_buf.buffers[0].as_mut_ptr().write_volatile(ch); }
unsafe {
user_buf.buffers[0].as_mut_ptr().write_volatile(ch);
}
1
}
fn write(&self, _user_buf: UserBuffer) -> usize {
@ -33,9 +39,13 @@ impl File for Stdin {
}
impl File for Stdout {
fn readable(&self) -> bool { false }
fn writable(&self) -> bool { true }
fn read(&self, _user_buf: UserBuffer) -> usize{
fn readable(&self) -> bool {
false
}
fn writable(&self) -> bool {
true
}
fn read(&self, _user_buf: UserBuffer) -> usize {
panic!("Cannot read from stdout!");
}
fn write(&self, user_buf: UserBuffer) -> usize {
@ -44,4 +54,4 @@ impl File for Stdout {
}
user_buf.len()
}
}
}

View file

@ -1,16 +1,23 @@
use core::panic::PanicInfo;
use core::arch::asm;
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
use core::arch::asm;
use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
if let Some(location) = info.location() {
println!("[kernel] Panicked at {}:{} {}", location.file(), location.line(), info.message().unwrap());
println!(
"[kernel] Panicked at {}:{} {}",
location.file(),
location.line(),
info.message().unwrap()
);
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
unsafe { backtrace(); }
unsafe {
backtrace();
}
shutdown()
}
@ -20,9 +27,11 @@ unsafe fn backtrace() {
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop { break; }
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
fp = *((fp-16) as *const usize);
if fp == stop {
break;
}
println!("#{}:ra={:#x}", i, *((fp - 8) as *const usize));
fp = *((fp - 16) as *const usize);
}
println!("---END BACKTRACE---");
}

View file

@ -10,17 +10,17 @@ extern crate bitflags;
#[macro_use]
mod console;
mod lang_items;
mod sbi;
mod syscall;
mod trap;
mod config;
mod drivers;
mod fs;
mod lang_items;
mod mm;
mod sbi;
mod sync;
mod syscall;
mod task;
mod timer;
mod sync;
mod mm;
mod fs;
mod drivers;
mod trap;
use core::arch::global_asm;
@ -32,10 +32,8 @@ fn clear_bss() {
fn ebss();
}
unsafe {
core::slice::from_raw_parts_mut(
sbss as usize as *mut u8,
ebss as usize - sbss as usize,
).fill(0);
core::slice::from_raw_parts_mut(sbss as usize as *mut u8, ebss as usize - sbss as usize)
.fill(0);
}
}

View file

@ -1,5 +1,5 @@
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
const PA_WIDTH_SV39: usize = 56;
@ -52,35 +52,59 @@ impl Debug for PhysPageNum {
/// usize -> T: usize.into()
impl From<usize> for PhysAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << PA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PA_WIDTH_SV39) - 1))
}
}
impl From<usize> for PhysPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << PPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PPN_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << VA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VA_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << VPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VPN_WIDTH_SV39) - 1))
}
}
impl From<PhysAddr> for usize {
fn from(v: PhysAddr) -> Self { v.0 }
fn from(v: PhysAddr) -> Self {
v.0
}
}
impl From<PhysPageNum> for usize {
fn from(v: PhysPageNum) -> Self { v.0 }
fn from(v: PhysPageNum) -> Self {
v.0
}
}
impl From<VirtAddr> for usize {
fn from(v: VirtAddr) -> Self { v.0 }
fn from(v: VirtAddr) -> Self {
v.0
}
}
impl From<VirtPageNum> for usize {
fn from(v: VirtPageNum) -> Self { v.0 }
fn from(v: VirtPageNum) -> Self {
v.0
}
}
impl VirtAddr {
pub fn floor(&self) -> VirtPageNum { VirtPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> VirtPageNum { VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<VirtAddr> for VirtPageNum {
fn from(v: VirtAddr) -> Self {
@ -89,13 +113,23 @@ impl From<VirtAddr> for VirtPageNum {
}
}
impl From<VirtPageNum> for VirtAddr {
fn from(v: VirtPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: VirtPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl PhysAddr {
pub fn floor(&self) -> PhysPageNum { PhysPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> PhysPageNum { PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<PhysAddr> for PhysPageNum {
fn from(v: PhysAddr) -> Self {
@ -104,7 +138,9 @@ impl From<PhysAddr> for PhysPageNum {
}
}
impl From<PhysPageNum> for PhysAddr {
fn from(v: PhysPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: PhysPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl VirtPageNum {
@ -121,28 +157,20 @@ impl VirtPageNum {
impl PhysAddr {
pub fn get_ref<T>(&self) -> &'static T {
unsafe {
(self.0 as *const T).as_ref().unwrap()
}
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe {
(self.0 as *mut T).as_mut().unwrap()
}
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512)
}
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096)
}
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = self.clone().into();
@ -165,41 +193,57 @@ impl StepByOne for PhysPageNum {
}
#[derive(Copy, Clone)]
pub struct SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
l: T,
r: T,
}
impl<T> SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(start: T, end: T) -> Self {
assert!(start <= end, "start {:?} > end {:?}!", start, end);
Self { l: start, r: end }
}
pub fn get_start(&self) -> T { self.l }
pub fn get_end(&self) -> T { self.r }
pub fn get_start(&self) -> T {
self.l
}
pub fn get_end(&self) -> T {
self.r
}
}
impl<T> IntoIterator for SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> IntoIterator for SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
type IntoIter = SimpleRangeIterator<T>;
fn into_iter(self) -> Self::IntoIter {
SimpleRangeIterator::new(self.l, self.r)
}
}
pub struct SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
current: T,
end: T,
}
impl<T> SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(l: T, r: T) -> Self {
Self { current: l, end: r, }
Self { current: l, end: r }
}
}
impl<T> Iterator for SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> Iterator for SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.current == self.end {

View file

@ -1,9 +1,9 @@
use super::{PhysAddr, PhysPageNum};
use alloc::vec::Vec;
use crate::sync::UPSafeCell;
use crate::config::MEMORY_END;
use lazy_static::*;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
pub struct FrameTracker {
pub ppn: PhysPageNum,
@ -72,10 +72,7 @@ impl FrameAllocator for StackFrameAllocator {
fn dealloc(&mut self, ppn: PhysPageNum) {
let ppn = ppn.0;
// validity check
if ppn >= self.current || self.recycled
.iter()
.find(|&v| {*v == ppn})
.is_some() {
if ppn >= self.current || self.recycled.iter().find(|&v| *v == ppn).is_some() {
panic!("Frame ppn={:#x} has not been allocated!", ppn);
}
// recycle
@ -86,18 +83,18 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> = unsafe {
UPSafeCell::new(FrameAllocatorImpl::new())
};
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
}
FRAME_ALLOCATOR
.exclusive_access()
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
FRAME_ALLOCATOR.exclusive_access().init(
PhysAddr::from(ekernel as usize).ceil(),
PhysAddr::from(MEMORY_END).floor(),
);
}
pub fn frame_alloc() -> Option<FrameTracker> {
@ -108,9 +105,7 @@ pub fn frame_alloc() -> Option<FrameTracker> {
}
pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR
.exclusive_access()
.dealloc(ppn);
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]

View file

@ -1,5 +1,5 @@
use buddy_system_allocator::LockedHeap;
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();

View file

@ -1,20 +1,15 @@
use super::{PageTable, PageTableEntry, PTEFlags};
use super::{VirtPageNum, VirtAddr, PhysPageNum, PhysAddr};
use super::{FrameTracker, frame_alloc};
use super::{VPNRange, StepByOne};
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use riscv::register::satp;
use alloc::sync::Arc;
use lazy_static::*;
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
use super::{StepByOne, VPNRange};
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE};
use crate::sync::UPSafeCell;
use crate::config::{
MEMORY_END,
PAGE_SIZE,
TRAMPOLINE,
MMIO,
};
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::arch::asm;
use lazy_static::*;
use riscv::register::satp;
extern "C" {
fn stext();
@ -30,9 +25,8 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> = Arc::new(unsafe {
UPSafeCell::new(MemorySet::new_kernel())
});
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
pub fn kernel_token() -> usize {
@ -55,17 +49,24 @@ impl MemorySet {
self.page_table.token()
}
/// Assume that no conflicts.
pub fn insert_framed_area(&mut self, start_va: VirtAddr, end_va: VirtAddr, permission: MapPermission) {
self.push(MapArea::new(
start_va,
end_va,
MapType::Framed,
permission,
), None);
pub fn insert_framed_area(
&mut self,
start_va: VirtAddr,
end_va: VirtAddr,
permission: MapPermission,
) {
self.push(
MapArea::new(start_va, end_va, MapType::Framed, permission),
None,
);
}
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
if let Some((idx, area)) = self
.areas
.iter_mut()
.enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn)
{
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
@ -94,50 +95,71 @@ impl MemorySet {
println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
println!(".bss [{:#x}, {:#x})", sbss_with_stack as usize, ebss as usize);
println!(
".bss [{:#x}, {:#x})",
sbss_with_stack as usize, ebss as usize
);
println!("mapping .text section");
memory_set.push(MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
), None);
memory_set.push(
MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
),
None,
);
println!("mapping .rodata section");
memory_set.push(MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
), None);
memory_set.push(
MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
),
None,
);
println!("mapping .data section");
memory_set.push(MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping .bss section");
memory_set.push(MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping physical memory");
memory_set.push(MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
memory_set.push(
MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
),
None,
);
println!("mapping .bss section");
memory_set.push(
MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping physical memory");
memory_set.push(
MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(
MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
}
memory_set
}
@ -161,26 +183,31 @@ impl MemorySet {
let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into();
let mut map_perm = MapPermission::U;
let ph_flags = ph.flags();
if ph_flags.is_read() { map_perm |= MapPermission::R; }
if ph_flags.is_write() { map_perm |= MapPermission::W; }
if ph_flags.is_execute() { map_perm |= MapPermission::X; }
let map_area = MapArea::new(
start_va,
end_va,
MapType::Framed,
map_perm,
);
if ph_flags.is_read() {
map_perm |= MapPermission::R;
}
if ph_flags.is_write() {
map_perm |= MapPermission::W;
}
if ph_flags.is_execute() {
map_perm |= MapPermission::X;
}
let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm);
max_end_vpn = map_area.vpn_range.get_end();
memory_set.push(
map_area,
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize])
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]),
);
}
}
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
(
memory_set,
user_stack_base,
elf.header.pt2.entry_point() as usize,
)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
@ -194,7 +221,9 @@ impl MemorySet {
for vpn in area.vpn_range {
let src_ppn = user_space.translate(vpn).unwrap().ppn();
let dst_ppn = memory_set.translate(vpn).unwrap().ppn();
dst_ppn.get_bytes_array().copy_from_slice(src_ppn.get_bytes_array());
dst_ppn
.get_bytes_array()
.copy_from_slice(src_ppn.get_bytes_array());
}
}
memory_set
@ -227,7 +256,7 @@ impl MapArea {
start_va: VirtAddr,
end_va: VirtAddr,
map_type: MapType,
map_perm: MapPermission
map_perm: MapPermission,
) -> Self {
let start_vpn: VirtPageNum = start_va.floor();
let end_vpn: VirtPageNum = end_va.ceil();
@ -326,15 +355,27 @@ pub fn remap_test() {
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
assert_eq!(
kernel_space.page_table.translate(mid_text.floor()).unwrap().writable(),
kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),
false
);
assert_eq!(
kernel_space.page_table.translate(mid_rodata.floor()).unwrap().writable(),
kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),
false,
);
assert_eq!(
kernel_space.page_table.translate(mid_data.floor()).unwrap().executable(),
kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),
false,
);
println!("remap_test passed!");

View file

@ -1,28 +1,22 @@
mod heap_allocator;
mod address;
mod frame_allocator;
mod page_table;
mod heap_allocator;
mod memory_set;
mod page_table;
use page_table::PTEFlags;
use address::VPNRange;
pub use address::{PhysAddr, VirtAddr, PhysPageNum, VirtPageNum, StepByOne};
pub use frame_allocator::{FrameTracker, frame_alloc, frame_dealloc,};
pub use page_table::{
PageTable,
PageTableEntry,
translated_byte_buffer,
translated_str,
translated_ref,
translated_refmut,
UserBuffer,
UserBufferIterator,
};
pub use memory_set::{MemorySet, KERNEL_SPACE, MapPermission, kernel_token};
pub use address::{PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
pub use frame_allocator::{frame_alloc, frame_dealloc, FrameTracker};
pub use memory_set::remap_test;
pub use memory_set::{kernel_token, MapPermission, MemorySet, KERNEL_SPACE};
use page_table::PTEFlags;
pub use page_table::{
translated_byte_buffer, translated_ref, translated_refmut, translated_str, PageTable,
PageTableEntry, UserBuffer, UserBufferIterator,
};
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();
KERNEL_SPACE.exclusive_access().activate();
}
}

View file

@ -1,15 +1,7 @@
use super::{
frame_alloc,
PhysPageNum,
FrameTracker,
VirtPageNum,
VirtAddr,
PhysAddr,
StepByOne
};
use alloc::vec::Vec;
use alloc::vec;
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use bitflags::*;
bitflags! {
@ -38,9 +30,7 @@ impl PageTableEntry {
}
}
pub fn empty() -> Self {
PageTableEntry {
bits: 0,
}
PageTableEntry { bits: 0 }
}
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
@ -132,17 +122,15 @@ impl PageTable {
*pte = PageTableEntry::empty();
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.find_pte(vpn)
.map(|pte| {pte.clone()})
self.find_pte(vpn).map(|pte| pte.clone())
}
pub fn translate_va(&self, va: VirtAddr) -> Option<PhysAddr> {
self.find_pte(va.clone().floor())
.map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
self.find_pte(va.clone().floor()).map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
}
pub fn token(&self) -> usize {
8usize << 60 | self.root_ppn.0
@ -157,10 +145,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
while start < end {
let start_va = VirtAddr::from(start);
let mut vpn = start_va.floor();
let ppn = page_table
.translate(vpn)
.unwrap()
.ppn();
let ppn = page_table.translate(vpn).unwrap().ppn();
vpn.step();
let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end));
@ -180,7 +165,10 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
let mut string = String::new();
let mut va = ptr as usize;
loop {
let ch: u8 = *(page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut());
let ch: u8 = *(page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut());
if ch == 0 {
break;
}
@ -192,13 +180,19 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
let page_table = PageTable::from_token(token);
page_table.translate_va(VirtAddr::from(ptr as usize)).unwrap().get_ref()
page_table
.translate_va(VirtAddr::from(ptr as usize))
.unwrap()
.get_ref()
}
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
let page_table = PageTable::from_token(token);
let va = ptr as usize;
page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut()
page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut()
}
pub struct UserBuffer {

View file

@ -43,4 +43,3 @@ pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");
}

View file

@ -1,6 +1,6 @@
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
use crate::sync::{Mutex, UPSafeCell};
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Condvar {
pub inner: UPSafeCell<CondvarInner>,
@ -13,11 +13,11 @@ pub struct CondvarInner {
impl Condvar {
pub fn new() -> Self {
Self {
inner: unsafe { UPSafeCell::new(
CondvarInner {
inner: unsafe {
UPSafeCell::new(CondvarInner {
wait_queue: VecDeque::new(),
}
)},
})
},
}
}
@ -28,7 +28,7 @@ impl Condvar {
}
}
pub fn wait(&self, mutex:Arc<dyn Mutex>) {
pub fn wait(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock();
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());

View file

@ -1,9 +1,9 @@
mod up;
mod condvar;
mod mutex;
mod semaphore;
mod condvar;
mod up;
pub use up::UPSafeCell;
pub use mutex::{Mutex, MutexSpin, MutexBlocking};
pub use condvar::Condvar;
pub use mutex::{Mutex, MutexBlocking, MutexSpin};
pub use semaphore::Semaphore;
pub use condvar::Condvar;
pub use up::UPSafeCell;

View file

@ -1,8 +1,8 @@
use super::UPSafeCell;
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use crate::task::TaskControlBlock;
use crate::task::{add_task, current_task};
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use alloc::{collections::VecDeque, sync::Arc};
pub trait Mutex: Sync + Send {
fn lock(&self);
@ -77,7 +77,7 @@ impl Mutex for MutexBlocking {
}
fn unlock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
let mut mutex_inner = self.inner.exclusive_access();
assert_eq!(mutex_inner.locked, true);
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
add_task(waking_task);

View file

@ -1,6 +1,6 @@
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
use crate::sync::UPSafeCell;
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Semaphore {
pub inner: UPSafeCell<SemaphoreInner>,
@ -14,12 +14,12 @@ pub struct SemaphoreInner {
impl Semaphore {
pub fn new(res_count: usize) -> Self {
Self {
inner: unsafe { UPSafeCell::new(
SemaphoreInner {
inner: unsafe {
UPSafeCell::new(SemaphoreInner {
count: res_count as isize,
wait_queue: VecDeque::new(),
}
)},
})
},
}
}

View file

@ -18,10 +18,12 @@ impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self { inner: RefCell::new(value) }
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}
}

View file

@ -1,11 +1,6 @@
use crate::mm::{
UserBuffer,
translated_byte_buffer,
translated_refmut,
translated_str,
};
use crate::task::{current_user_token, current_process};
use crate::fs::{make_pipe, OpenFlags, open_file};
use crate::fs::{make_pipe, open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_refmut, translated_str, UserBuffer};
use crate::task::{current_process, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
@ -22,9 +17,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let file = file.clone();
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.write(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.write(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -44,9 +37,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.read(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.read(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -56,10 +47,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(
path.as_str(),
OpenFlags::from_bits(flags).unwrap()
) {
if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) {
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);

View file

@ -27,17 +27,17 @@ const SYSCALL_CONDVAR_WAIT: usize = 1032;
mod fs;
mod process;
mod thread;
mod sync;
mod thread;
use fs::*;
use process::*;
use thread::*;
use sync::*;
use thread::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_DUP=> sys_dup(args[0]),
SYSCALL_DUP => sys_dup(args[0]),
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),
SYSCALL_CLOSE => sys_close(args[0]),
SYSCALL_PIPE => sys_pipe(args[0] as *mut usize),
@ -66,4 +66,3 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}

View file

@ -1,23 +1,13 @@
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_ref, translated_refmut, translated_str};
use crate::task::{
current_process, current_task, current_user_token, exit_current_and_run_next,
suspend_current_and_run_next,
exit_current_and_run_next,
current_task,
current_process,
current_user_token,
};
use crate::timer::get_time_ms;
use crate::mm::{
translated_str,
translated_refmut,
translated_ref,
};
use crate::fs::{
open_file,
OpenFlags,
};
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use alloc::string::String;
pub fn sys_exit(exit_code: i32) -> ! {
exit_current_and_run_next(exit_code);
@ -61,7 +51,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
break;
}
args_vec.push(translated_str(token, arg_str_ptr as *const u8));
unsafe { args = args.add(1); }
unsafe {
args = args.add(1);
}
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
@ -82,21 +74,20 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
// find a child process
let mut inner = process.inner_exclusive_access();
if inner.children
if inner
.children
.iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()})
.is_none() {
.find(|p| pid == -1 || pid as usize == p.getpid())
.is_none()
{
return -1;
// ---- release current PCB
}
let pair = inner.children
.iter()
.enumerate()
.find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
let pair = inner.children.iter().enumerate().find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
let child = inner.children.remove(idx);
// confirm that child will be deallocated after being removed from children list

View file

@ -1,6 +1,6 @@
use crate::task::{current_task, current_process, block_current_and_run_next};
use crate::sync::{Mutex, MutexSpin, MutexBlocking, Semaphore, Condvar};
use crate::timer::{get_time_ms, add_timer};
use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
use crate::task::{block_current_and_run_next, current_process, current_task};
use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
@ -24,7 +24,8 @@ pub fn sys_mutex_create(blocking: bool) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
.map(|(id, _)| id)
{
process_inner.mutex_list[id] = mutex;
id as isize
} else {
@ -61,11 +62,14 @@ pub fn sys_semaphore_create(res_count: usize) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
.map(|(id, _)| id)
{
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
id
} else {
process_inner.semaphore_list.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner
.semaphore_list
.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner.semaphore_list.len() - 1
};
id as isize
@ -89,7 +93,6 @@ pub fn sys_semaphore_down(sem_id: usize) -> isize {
0
}
pub fn sys_condvar_create(_arg: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
@ -98,11 +101,14 @@ pub fn sys_condvar_create(_arg: usize) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
.map(|(id, _)| id)
{
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
id
} else {
process_inner.condvar_list.push(Some(Arc::new(Condvar::new())));
process_inner
.condvar_list
.push(Some(Arc::new(Condvar::new())));
process_inner.condvar_list.len() - 1
};
id as isize
@ -125,4 +131,4 @@ pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
drop(process_inner);
condvar.wait(mutex);
0
}
}

View file

@ -1,5 +1,9 @@
use crate::{
mm::kernel_token,
task::{add_task, current_task, TaskControlBlock},
trap::{trap_handler, TrapContext},
};
use alloc::sync::Arc;
use crate::{mm::kernel_token, task::{TaskControlBlock, add_task, current_task}, trap::{TrapContext, trap_handler}};
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
let task = current_task().unwrap();
@ -7,7 +11,11 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
// create a new thread
let new_task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
task.inner_exclusive_access().res.as_ref().unwrap().ustack_base,
task.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base,
true,
));
// add new task to scheduler
@ -35,7 +43,13 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
}
pub fn sys_gettid() -> isize {
current_task().unwrap().inner_exclusive_access().res.as_ref().unwrap().tid as isize
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.tid as isize
}
/// thread does not exist, return -1

View file

@ -23,4 +23,3 @@ impl TaskContext {
}
}
}

View file

@ -1,9 +1,12 @@
use alloc::{vec::Vec, sync::{Arc, Weak}};
use lazy_static::*;
use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
@ -29,20 +32,18 @@ impl RecycleAllocator {
assert!(id < self.current);
assert!(
self.recycled.iter().find(|i| **i == id).is_none(),
"id {} has been deallocated!", id
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
}
pub struct PidHandle(pub usize);
@ -69,13 +70,11 @@ pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE
.exclusive_access()
.insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
@ -91,11 +90,15 @@ impl Drop for KernelStack {
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
@ -142,23 +145,19 @@ impl TaskUserRes {
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner
.memory_set
.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner
.memory_set
.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
@ -167,10 +166,14 @@ impl TaskUserRes {
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
@ -197,12 +200,18 @@ impl TaskUserRes {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize { self.ustack_base }
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
@ -212,4 +221,3 @@ impl Drop for TaskUserRes {
self.dealloc_user_res();
}
}

View file

@ -1,5 +1,5 @@
use crate::sync::UPSafeCell;
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
@ -11,7 +11,9 @@ pub struct TaskManager {
/// A simple FIFO scheduler.
impl TaskManager {
pub fn new() -> Self {
Self { ready_queue: VecDeque::new(), }
Self {
ready_queue: VecDeque::new(),
}
}
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
@ -22,9 +24,8 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> = unsafe {
UPSafeCell::new(TaskManager::new())
};
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {

View file

@ -1,38 +1,26 @@
mod context;
mod id;
mod manager;
mod process;
mod processor;
mod switch;
mod task;
mod manager;
mod processor;
mod id;
mod process;
use crate::fs::{open_file, OpenFlags};
use switch::__switch;
use alloc::sync::Arc;
use manager::fetch_task;
use lazy_static::*;
use manager::fetch_task;
use process::ProcessControlBlock;
use switch::__switch;
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle};
pub use manager::add_task;
pub use processor::{
run_tasks,
current_task,
current_process,
current_user_token,
current_trap_cx_user_va,
current_trap_cx,
current_kstack_top,
take_current_task,
schedule,
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
};
pub use task::{TaskControlBlock, TaskStatus};
pub use manager::add_task;
pub use id::{
PidHandle,
pid_alloc,
KernelStack,
kstack_alloc,
};
pub fn suspend_current_and_run_next() {
// There must be an application running.
@ -86,7 +74,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
@ -103,6 +91,8 @@ pub fn exit_current_and_run_next(exit_code: i32) {
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
drop(process);
// we do not have to save task context

View file

@ -1,20 +1,16 @@
use crate::mm::{
MemorySet,
KERNEL_SPACE,
translated_refmut,
};
use crate::trap::{TrapContext, trap_handler};
use crate::sync::{UPSafeCell, Mutex, Semaphore, Condvar};
use core::cell::RefMut;
use super::add_task;
use super::id::RecycleAllocator;
use super::TaskControlBlock;
use super::{PidHandle, pid_alloc};
use super::add_task;
use alloc::sync::{Weak, Arc};
use super::{pid_alloc, PidHandle};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPSafeCell};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
use alloc::string::String;
use crate::fs::{File, Stdin, Stdout};
use core::cell::RefMut;
pub struct ProcessControlBlock {
// immutable
@ -34,7 +30,7 @@ pub struct ProcessControlBlockInner {
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
@ -44,8 +40,7 @@ impl ProcessControlBlockInner {
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
@ -57,7 +52,7 @@ impl ProcessControlBlockInner {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize){
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
@ -82,26 +77,28 @@ impl ProcessControlBlock {
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
@ -154,7 +151,7 @@ impl ProcessControlBlock {
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
@ -201,29 +198,37 @@ impl ProcessControlBlock {
new_fd_table.push(None);
}
}
// create child process pcb
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent.get_task(0).inner_exclusive_access().res.as_ref().unwrap().ustack_base(),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
@ -246,4 +251,3 @@ impl ProcessControlBlock {
self.pid.0
}
}

View file

@ -1,10 +1,10 @@
use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{ProcessControlBlock, TaskContext, TaskControlBlock};
use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
use super::{fetch_task, TaskStatus};
use super::__switch;
use crate::trap::TrapContext;
use crate::sync::UPSafeCell;
pub struct Processor {
current: Option<Arc<TaskControlBlock>>,
@ -30,9 +30,7 @@ impl Processor {
}
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe {
UPSafeCell::new(Processor::new())
};
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn run_tasks() {
@ -50,13 +48,10 @@ pub fn run_tasks() {
// release processor manually
drop(processor);
unsafe {
__switch(
idle_task_cx_ptr,
next_task_cx_ptr,
);
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
println!("no tasks available in run_tasks");
}
}
}
@ -80,7 +75,10 @@ pub fn current_user_token() -> usize {
}
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().inner_exclusive_access().get_trap_cx()
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
@ -94,10 +92,7 @@ pub fn current_trap_cx_user_va() -> usize {
}
pub fn current_kstack_top() -> usize {
current_task()
.unwrap()
.kstack
.get_top()
current_task().unwrap().kstack.get_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
@ -105,9 +100,6 @@ pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe {
__switch(
switched_task_cx_ptr,
idle_task_cx_ptr,
);
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}
}

View file

@ -4,8 +4,5 @@ use core::arch::global_asm;
global_asm!(include_str!("switch.S"));
extern "C" {
pub fn __switch(
current_task_cx_ptr: *mut TaskContext,
next_task_cx_ptr: *const TaskContext
);
pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext);
}

View file

@ -1,8 +1,8 @@
use alloc::sync::{Arc, Weak};
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use crate::trap::TrapContext;
use super::id::TaskUserRes;
use super::{KernelStack, ProcessControlBlock, TaskContext, kstack_alloc};
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use alloc::sync::{Arc, Weak};
use core::cell::RefMut;
pub struct TaskControlBlock {
@ -37,7 +37,7 @@ impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
#[allow(unused)]
fn get_status(&self) -> TaskStatus {
self.task_status
@ -48,7 +48,7 @@ impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
@ -57,15 +57,15 @@ impl TaskControlBlock {
Self {
process: Arc::downgrade(&process),
kstack,
inner: unsafe { UPSafeCell::new(
TaskControlBlockInner {
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
res: Some(res),
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: None,
}
)},
})
},
}
}
}

View file

@ -1,13 +1,13 @@
use core::cmp::Ordering;
use riscv::register::time;
use crate::sbi::set_timer;
use crate::config::CLOCK_FREQ;
use crate::task::{TaskControlBlock, add_task};
use crate::sbi::set_timer;
use crate::sync::UPSafeCell;
use crate::task::{add_task, TaskControlBlock};
use alloc::collections::BinaryHeap;
use alloc::sync::Arc;
use lazy_static::*;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
@ -39,28 +39,24 @@ impl PartialOrd for TimerCondVar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a = -(self.expire_ms as isize);
let b = -(other.expire_ms as isize);
Some(a.cmp(&b))
Some(a.cmp(&b))
}
}
impl Ord for TimerCondVar {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
self.partial_cmp(other).unwrap()
}
}
lazy_static! {
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> = unsafe { UPSafeCell::new(
BinaryHeap::<TimerCondVar>::new()
)};
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPSafeCell::new(BinaryHeap::<TimerCondVar>::new()) };
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
let mut timers = TIMERS.exclusive_access();
timers.push(TimerCondVar {
expire_ms,
task,
});
timers.push(TimerCondVar { expire_ms, task });
}
pub fn check_timer() {
@ -68,9 +64,11 @@ pub fn check_timer() {
let mut timers = TIMERS.exclusive_access();
while let Some(timer) = timers.peek() {
if timer.expire_ms <= current_ms {
add_task(Arc::clone(&timer.task));
add_task(Arc::clone(&timer.task));
drop(timer);
timers.pop();
} else { break; }
} else {
break;
}
}
}

View file

@ -1,4 +1,4 @@
use riscv::register::sstatus::{Sstatus, self, SPP};
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
@ -12,7 +12,9 @@ pub struct TrapContext {
}
impl TrapContext {
pub fn set_sp(&mut self, sp: usize) { self.x[2] = sp; }
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
pub fn app_init_context(
entry: usize,
sp: usize,

View file

@ -1,28 +1,18 @@
mod context;
use riscv::register::{
mtvec::TrapMode,
stvec,
scause::{
self,
Trap,
Exception,
Interrupt,
},
stval,
sie,
};
use crate::config::TRAMPOLINE;
use crate::syscall::syscall;
use crate::task::{
exit_current_and_run_next,
current_trap_cx, current_trap_cx_user_va, current_user_token, exit_current_and_run_next,
suspend_current_and_run_next,
current_user_token,
current_trap_cx,
current_trap_cx_user_va,
};
use crate::timer::{set_next_trigger, check_timer};
use crate::config::TRAMPOLINE;
use core::arch::{global_asm, asm};
use crate::timer::{check_timer, set_next_trigger};
use core::arch::{asm, global_asm};
use riscv::register::{
mtvec::TrapMode,
scause::{self, Exception, Interrupt, Trap},
sie, stval, stvec,
};
global_asm!(include_str!("trap.S"));
@ -43,7 +33,9 @@ fn set_user_trap_entry() {
}
pub fn enable_timer_interrupt() {
unsafe { sie::set_stimer(); }
unsafe {
sie::set_stimer();
}
}
#[no_mangle]
@ -62,12 +54,12 @@ pub fn trap_handler() -> ! {
cx = current_trap_cx();
cx.x[10] = result as usize;
}
Trap::Exception(Exception::StoreFault) |
Trap::Exception(Exception::StorePageFault) |
Trap::Exception(Exception::InstructionFault) |
Trap::Exception(Exception::InstructionPageFault) |
Trap::Exception(Exception::LoadFault) |
Trap::Exception(Exception::LoadPageFault) => {
Trap::Exception(Exception::StoreFault)
| Trap::Exception(Exception::StorePageFault)
| Trap::Exception(Exception::InstructionFault)
| Trap::Exception(Exception::InstructionPageFault)
| Trap::Exception(Exception::LoadFault)
| Trap::Exception(Exception::LoadPageFault) => {
println!(
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, kernel killed it.",
scause.cause(),
@ -88,7 +80,11 @@ pub fn trap_handler() -> ! {
suspend_current_and_run_next();
}
_ => {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
panic!(
"Unsupported trap {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
trap_return();