Introduce IdBitmap to fix the syncing metadata bug of Ext2
This commit is contained in:
parent
0677adc4e2
commit
2021160e97
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use id_alloc::IdAlloc;
|
||||
use ostd::{const_assert, mm::io_util::HasVmReaderWriter};
|
||||
|
||||
use super::{
|
||||
|
|
@ -10,6 +9,7 @@ use super::{
|
|||
prelude::*,
|
||||
super_block::SuperBlock,
|
||||
};
|
||||
use crate::fs::utils::IdBitmap;
|
||||
|
||||
/// Blocks are clustered into block groups in order to reduce fragmentation and minimise
|
||||
/// the amount of head seeking when reading a large amount of consecutive data.
|
||||
|
|
@ -49,13 +49,13 @@ impl BlockGroup {
|
|||
GroupDescriptor::from(raw_descriptor)
|
||||
};
|
||||
|
||||
let get_bitmap = |bid: Ext2Bid, capacity: usize| -> Result<IdAlloc> {
|
||||
let get_bitmap = |bid: Ext2Bid, capacity: usize| -> Result<IdBitmap> {
|
||||
if capacity > BLOCK_SIZE * 8 {
|
||||
return_errno_with_message!(Errno::EINVAL, "bad bitmap");
|
||||
}
|
||||
let mut buf = vec![0u8; BLOCK_SIZE];
|
||||
block_device.read_bytes(bid as usize * BLOCK_SIZE, &mut buf)?;
|
||||
Ok(IdAlloc::from_bytes_with_capacity(&buf, capacity))
|
||||
Ok(IdBitmap::from_buf(buf.into_boxed_slice(), capacity as u16))
|
||||
};
|
||||
|
||||
let block_bitmap = {
|
||||
|
|
@ -366,13 +366,13 @@ struct Inner {
|
|||
#[derive(Clone, Debug)]
|
||||
struct GroupMetadata {
|
||||
descriptor: GroupDescriptor,
|
||||
block_bitmap: IdAlloc,
|
||||
inode_bitmap: IdAlloc,
|
||||
block_bitmap: IdBitmap,
|
||||
inode_bitmap: IdBitmap,
|
||||
}
|
||||
|
||||
impl GroupMetadata {
|
||||
pub fn is_inode_allocated(&self, inode_idx: u32) -> bool {
|
||||
self.inode_bitmap.is_allocated(inode_idx as usize)
|
||||
self.inode_bitmap.is_allocated(inode_idx as u16)
|
||||
}
|
||||
|
||||
pub fn alloc_inode(&mut self, is_dir: bool) -> Option<u32> {
|
||||
|
|
@ -385,7 +385,7 @@ impl GroupMetadata {
|
|||
}
|
||||
|
||||
pub fn free_inode(&mut self, inode_idx: u32, is_dir: bool) {
|
||||
self.inode_bitmap.free(inode_idx as usize);
|
||||
self.inode_bitmap.free(inode_idx as u16);
|
||||
self.inc_free_inodes();
|
||||
if is_dir {
|
||||
self.dec_dirs();
|
||||
|
|
@ -393,18 +393,18 @@ impl GroupMetadata {
|
|||
}
|
||||
|
||||
pub fn is_block_allocated(&self, block_idx: Ext2Bid) -> bool {
|
||||
self.block_bitmap.is_allocated(block_idx as usize)
|
||||
self.block_bitmap.is_allocated(block_idx as u16)
|
||||
}
|
||||
|
||||
pub fn alloc_blocks(&mut self, count: Ext2Bid) -> Option<Range<Ext2Bid>> {
|
||||
let mut current_count = count.min(self.free_blocks_count() as Ext2Bid) as usize;
|
||||
let mut current_count = count.min(self.free_blocks_count() as Ext2Bid) as u16;
|
||||
while current_count > 0 {
|
||||
let Some(range) = self.block_bitmap.alloc_consecutive(current_count) else {
|
||||
// It is efficient to halve the value
|
||||
current_count /= 2;
|
||||
continue;
|
||||
};
|
||||
self.dec_free_blocks(current_count as u16);
|
||||
self.dec_free_blocks(current_count);
|
||||
return Some((range.start as Ext2Bid)..(range.end as Ext2Bid));
|
||||
}
|
||||
None
|
||||
|
|
@ -412,7 +412,7 @@ impl GroupMetadata {
|
|||
|
||||
pub fn free_blocks(&mut self, range: Range<Ext2Bid>) {
|
||||
self.block_bitmap
|
||||
.free_consecutive((range.start as usize)..(range.end as usize));
|
||||
.free_consecutive((range.start as u16)..(range.end as u16));
|
||||
self.inc_free_blocks(range.len() as u16);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,193 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use core::ops::Range;
|
||||
|
||||
use aster_block::BLOCK_SIZE;
|
||||
use bitvec::{
|
||||
order::Lsb0,
|
||||
slice::BitSlice,
|
||||
view::{AsBits, AsMutBits},
|
||||
};
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
/// A disk I/O-friendly bitmap for ID management (e.g., block or inode IDs).
|
||||
///
|
||||
/// An ID bitmap has the same size as a block, i.e., `BLOCK_SIZE`.
|
||||
/// Each bit in the bitmap represents one ID:
|
||||
/// bit 0 at the i-th position of the bitmap means the i-th ID is free
|
||||
/// and bit 1 means the ID is in use.
|
||||
/// As such, the bitmap can contain contain at most `BLOCK_SIZE` * 8 bits/IDs.
|
||||
#[derive(Clone)]
|
||||
pub struct IdBitmap {
|
||||
buf: Box<[u8]>,
|
||||
first_available_id: u16,
|
||||
len: u16,
|
||||
}
|
||||
|
||||
impl IdBitmap {
|
||||
/// Creates a new ID bitmap out of a given buffer, whose first `len`-bits represent valid IDs.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method panics if `len` is greater than [`IdBitmap::capacity()`].
|
||||
pub fn from_buf(buf: Box<[u8]>, len: u16) -> Self {
|
||||
assert!(len <= Self::capacity());
|
||||
let mut bitmap = Self {
|
||||
buf,
|
||||
first_available_id: 0,
|
||||
len,
|
||||
};
|
||||
|
||||
let bit_slice = bitmap.bit_slice();
|
||||
bitmap.first_available_id = (0..len).find(|&i| !bit_slice[i as usize]).unwrap_or(len);
|
||||
bitmap
|
||||
}
|
||||
|
||||
/// Returns the length of the ID bitmap, i.e., the maximum number of IDs.
|
||||
#[expect(unused)]
|
||||
pub const fn len(&self) -> u16 {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Returns the capacity of the ID bitmap.
|
||||
///
|
||||
/// The capacity is the size of the underlying buffer in bits.
|
||||
pub const fn capacity() -> u16 {
|
||||
BLOCK_SIZE as u16 * 8
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying buffer of `BLOCK_SIZE` bytes.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.buf
|
||||
}
|
||||
|
||||
fn bit_slice(&self) -> &BitSlice<u8, Lsb0> {
|
||||
&self.buf.as_bits()[..self.len as usize]
|
||||
}
|
||||
|
||||
fn bit_slice_mut(&mut self) -> &mut BitSlice<u8, Lsb0> {
|
||||
&mut self.buf.as_mut_bits()[..self.len as usize]
|
||||
}
|
||||
|
||||
/// Returns true if the `id` is allocated.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the `id` is out of bounds, this method will panic.
|
||||
pub fn is_allocated(&self, id: u16) -> bool {
|
||||
self.bit_slice()[id as usize]
|
||||
}
|
||||
|
||||
/// Allocates and returns a new `id`.
|
||||
///
|
||||
/// If allocation is not possible, it returns `None`.
|
||||
pub fn alloc(&mut self) -> Option<u16> {
|
||||
if self.first_available_id < self.len {
|
||||
let id = self.first_available_id;
|
||||
self.bit_slice_mut().set(id as usize, true);
|
||||
|
||||
let bit_slice = self.bit_slice();
|
||||
self.first_available_id = (id + 1..self.len)
|
||||
.find(|&i| !bit_slice[i as usize])
|
||||
.unwrap_or(self.len);
|
||||
|
||||
Some(id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a consecutive range of new `id`s.
|
||||
///
|
||||
/// The `count` is the number of consecutive `id`s to allocate. If it is 0, return `None`.
|
||||
///
|
||||
/// If allocation is not possible, it returns `None`.
|
||||
pub fn alloc_consecutive(&mut self, count: u16) -> Option<Range<u16>> {
|
||||
if count == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Scan the bitmap from the position `first_available_id`
|
||||
// for the first `count` number of consecutive 0's.
|
||||
let allocated_range = {
|
||||
// Invariance: all bits within `curr_range` are 0's.
|
||||
let bit_slice = self.bit_slice();
|
||||
let mut curr_range = self.first_available_id..self.first_available_id + 1;
|
||||
while curr_range.len() < count as usize && curr_range.end < self.len {
|
||||
if !bit_slice[curr_range.end as usize] {
|
||||
curr_range.end += 1;
|
||||
} else {
|
||||
curr_range = curr_range.end + 1..curr_range.end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if curr_range.len() < count as usize {
|
||||
return None;
|
||||
}
|
||||
|
||||
curr_range
|
||||
};
|
||||
|
||||
// Set every bit to 1 within the allocated range.
|
||||
let bit_slice_mut = self.bit_slice_mut();
|
||||
for id in allocated_range.clone() {
|
||||
bit_slice_mut.set(id as usize, true);
|
||||
}
|
||||
|
||||
// In case we need to update `first_available_id`.
|
||||
let bit_slice = self.bit_slice();
|
||||
if bit_slice[self.first_available_id as usize] {
|
||||
self.first_available_id = (allocated_range.end..self.len)
|
||||
.find(|&i| !bit_slice[i as usize])
|
||||
.map_or(self.len, |i| i);
|
||||
}
|
||||
|
||||
Some(allocated_range)
|
||||
}
|
||||
|
||||
/// Releases the allocated `id`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the `id` is out of bounds, this method will panic.
|
||||
pub fn free(&mut self, id: u16) {
|
||||
debug_assert!(self.bit_slice()[id as usize]);
|
||||
|
||||
self.bit_slice_mut().set(id as usize, false);
|
||||
if id < self.first_available_id {
|
||||
self.first_available_id = id;
|
||||
}
|
||||
}
|
||||
|
||||
/// Releases the consecutive range of allocated `id`s.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the `range` is out of bounds, this method will panic.
|
||||
pub fn free_consecutive(&mut self, range: Range<u16>) {
|
||||
if range.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let range_start = range.start;
|
||||
let bit_slice_mut = self.bit_slice_mut();
|
||||
for id in range {
|
||||
debug_assert!(bit_slice_mut[id as usize]);
|
||||
bit_slice_mut.set(id as usize, false);
|
||||
}
|
||||
|
||||
if range_start < self.first_available_id {
|
||||
self.first_available_id = range_start
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for IdBitmap {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
f.debug_struct("IdBitMap")
|
||||
.field("len", &self.len)
|
||||
.field("first_available_id", &self.first_available_id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ pub use falloc_mode::FallocMode;
|
|||
pub use file_creation_mask::{AtomicFileCreationMask, FileCreationMask};
|
||||
pub use flock::{FlockItem, FlockList, FlockType};
|
||||
pub use fs::{FileSystem, FsFlags, SuperBlock};
|
||||
pub use id_bitmap::IdBitmap;
|
||||
pub use inode::{
|
||||
Extension, Inode, InodeIo, InodeType, Metadata, MknodType, Permission, SymbolicLink,
|
||||
};
|
||||
|
|
@ -36,6 +37,7 @@ mod falloc_mode;
|
|||
mod file_creation_mask;
|
||||
mod flock;
|
||||
mod fs;
|
||||
mod id_bitmap;
|
||||
mod inode;
|
||||
mod inode_mode;
|
||||
mod ioctl;
|
||||
|
|
|
|||
Loading…
Reference in New Issue