Small Fix && cargo fmt

This commit is contained in:
Yifan Wu 2022-01-22 12:40:54 -08:00
parent c9583b0f53
commit ae3ba9c26f
83 changed files with 1085 additions and 1079 deletions

View file

@ -1,9 +1,5 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use super::{
BlockDevice,
BLOCK_SZ,
get_block_cache,
};
type BitmapBlock = [u64; 64];
@ -34,14 +30,15 @@ impl Bitmap {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
)
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| {
(bits64_pos, bits64.trailing_ones() as usize)
}) {
.map(|(bits64_pos, bits64)| (bits64_pos, bits64.trailing_ones() as usize))
{
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
@ -58,13 +55,12 @@ impl Bitmap {
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(
block_pos + self.start_block_id,
Arc::clone(block_device)
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {

View file

@ -1,7 +1,4 @@
use super::{
BLOCK_SZ,
BlockDevice,
};
use super::{BlockDevice, BLOCK_SZ};
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
@ -16,10 +13,7 @@ pub struct BlockCache {
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Self {
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
@ -34,14 +28,20 @@ impl BlockCache {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T where T: Sized {
pub fn get_ref<T>(&self, offset: usize) -> &T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T where T: Sized {
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
@ -53,7 +53,7 @@ impl BlockCache {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset:usize, f: impl FnOnce(&mut T) -> V) -> V {
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
@ -79,7 +79,9 @@ pub struct BlockCacheManager {
impl BlockCacheManager {
pub fn new() -> Self {
Self { queue: VecDeque::new() }
Self {
queue: VecDeque::new(),
}
}
pub fn get_block_cache(
@ -87,27 +89,28 @@ impl BlockCacheManager {
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue
.iter()
.find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self.queue
if let Some((idx, _)) = self
.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1) {
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
{
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(
BlockCache::new(block_id, Arc::clone(&block_device))
));
let block_cache = Arc::new(Mutex::new(BlockCache::new(
block_id,
Arc::clone(&block_device),
)));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
@ -115,16 +118,17 @@ impl BlockCacheManager {
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
Mutex::new(BlockCacheManager::new());
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
BLOCK_CACHE_MANAGER
.lock()
.get_block_cache(block_id, block_device)
}
pub fn block_cache_sync_all() {

View file

@ -1,6 +1,6 @@
use core::any::Any;
pub trait BlockDevice : Send + Sync + Any {
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
}

View file

@ -1,16 +1,10 @@
use alloc::sync::Arc;
use spin::Mutex;
use super::{
BlockDevice,
Bitmap,
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
SuperBlock,
DiskInode,
DiskInodeType,
Inode,
get_block_cache,
block_cache_sync_all,
};
use crate::BLOCK_SZ;
use alloc::sync::Arc;
use spin::Mutex;
pub struct EasyFileSystem {
pub block_device: Arc<dyn BlockDevice>,
@ -50,39 +44,36 @@ impl EasyFileSystem {
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(
i as usize,
Arc::clone(&block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() { *byte = 0; }
});
get_block_cache(i as usize, Arc::clone(&block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() {
*byte = 0;
}
});
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.modify(0, |super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
});
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
0,
|super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
},
);
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(
root_inode_block_id as usize,
Arc::clone(&block_device)
)
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
@ -97,10 +88,7 @@ impl EasyFileSystem {
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(
1,
super_block.inode_bitmap_blocks as usize
),
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
@ -117,19 +105,17 @@ impl EasyFileSystem {
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
// release efs lock
Inode::new(
block_id,
block_offset,
Arc::clone(efs),
block_device,
)
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
(block_id, (inode_id % inodes_per_block) as usize * inode_size)
(
block_id,
(inode_id % inodes_per_block) as usize * inode_size,
)
}
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
@ -146,18 +132,16 @@ impl EasyFileSystem {
}
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(
block_id as usize,
Arc::clone(&self.block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| { *p = 0; })
});
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize
(block_id - self.data_area_start_block) as usize,
)
}
}
}

View file

@ -1,11 +1,7 @@
use core::fmt::{Debug, Formatter, Result};
use super::{
BLOCK_SZ,
BlockDevice,
get_block_cache,
};
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
const EFS_MAGIC: u32 = 0x3b800001;
const INODE_DIRECT_COUNT: usize = 28;
@ -115,7 +111,8 @@ impl DiskInode {
if data_blocks > INDIRECT1_BOUND {
total += 1;
// sub indirect1
total += (data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
total +=
(data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
}
total as u32
}
@ -135,22 +132,16 @@ impl DiskInode {
})
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(
indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
pub fn increase_size(
@ -169,7 +160,7 @@ impl DiskInode {
current_blocks += 1;
}
// alloc indirect1
if total_blocks > INODE_DIRECT_COUNT as u32{
if total_blocks > INODE_DIRECT_COUNT as u32 {
if current_blocks == INODE_DIRECT_COUNT as u32 {
self.indirect1 = new_blocks.next().unwrap();
}
@ -179,17 +170,14 @@ impl DiskInode {
return;
}
// fill indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
@ -206,33 +194,27 @@ impl DiskInode {
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
// fill current
get_block_cache(
indirect2[a0] as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
});
});
}
/// Clear size to zero and return blocks that should be deallocated.
@ -258,18 +240,15 @@ impl DiskInode {
return v;
}
// indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
@ -282,36 +261,33 @@ impl DiskInode {
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
self.indirect2 = 0;
v
}
@ -346,7 +322,9 @@ impl DiskInode {
});
read_size += block_read_size;
// move to next block
if end_current_block == end { break; }
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
@ -372,7 +350,7 @@ impl DiskInode {
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device)
Arc::clone(block_device),
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
@ -382,7 +360,9 @@ impl DiskInode {
});
write_size += block_write_size;
// move to next block
if end_current_block == end { break; }
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
@ -414,20 +394,10 @@ impl DirEntry {
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe {
core::slice::from_raw_parts(
self as *const _ as usize as *const u8,
DIRENT_SZ,
)
}
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
core::slice::from_raw_parts_mut(
self as *mut _ as usize as *mut u8,
DIRENT_SZ,
)
}
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
}
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();

View file

@ -2,17 +2,17 @@
extern crate alloc;
mod block_dev;
mod layout;
mod efs;
mod bitmap;
mod vfs;
mod block_cache;
mod block_dev;
mod efs;
mod layout;
mod vfs;
pub const BLOCK_SZ: usize = 512;
use bitmap::Bitmap;
use block_cache::{block_cache_sync_all, get_block_cache};
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
pub use vfs::Inode;
use layout::*;
use bitmap::Bitmap;
use block_cache::{get_block_cache, block_cache_sync_all};
pub use vfs::Inode;

View file

@ -1,15 +1,9 @@
use super::{
BlockDevice,
DiskInode,
DiskInodeType,
DirEntry,
EasyFileSystem,
DIRENT_SZ,
get_block_cache,
block_cache_sync_all,
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
EasyFileSystem, DIRENT_SZ,
};
use alloc::sync::Arc;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
@ -37,35 +31,25 @@ impl Inode {
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().read(self.block_offset, f)
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().modify(self.block_offset, f)
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
fn find_inode_id(
&self,
name: &str,
disk_inode: &DiskInode,
) -> Option<u32> {
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(
DIRENT_SZ * i,
dirent.as_bytes_mut(),
&self.block_device,
),
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
if dirent.name() == name {
@ -78,8 +62,7 @@ impl Inode {
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode)
.map(|inode_id| {
self.find_inode_id(name, disk_inode).map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
@ -123,14 +106,12 @@ impl Inode {
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset)
= fs.get_disk_inode_pos(new_inode_id);
get_block_cache(
new_inode_block_id as usize,
Arc::clone(&self.block_device)
).lock().modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
@ -166,11 +147,7 @@ impl Inode {
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(
i * DIRENT_SZ,
dirent.as_bytes_mut(),
&self.block_device,
),
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
@ -181,9 +158,7 @@ impl Inode {
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
disk_inode.read_at(offset, buf, &self.block_device)
})
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {