Add exFAT file system

This commit is contained in:
Yingdi Shan 2024-03-18 15:58:37 +08:00 committed by Tate, Hongliang Tian
parent 52f07458f7
commit 650c7dfbad
29 changed files with 5861 additions and 42 deletions

View File

@ -75,6 +75,10 @@ jobs:
- name: Syscall Test at Ext2 (MicroVM)
id: syscall_test_at_ext2
run: make run AUTO_TEST=syscall SYSCALL_TEST_DIR=/ext2 ENABLE_KVM=0 QEMU_MACHINE=microvm RELEASE_MODE=1
- name: Syscall Test at Exfat
id: syscall_test_at_exfat_linux
run: make run AUTO_TEST=syscall SYSCALL_TEST_DIR=/exfat EXTRA_BLOCKLISTS_DIRS=blocklists.exfat ENABLE_KVM=0 BOOT_PROTOCOL=linux-efi-handover64 RELEASE_MODE=1
- name: Regression Test (MicroVM)
id: regression_test_linux

2
Cargo.lock generated
View File

@ -211,6 +211,7 @@ dependencies = [
"cpio-decoder",
"getrandom",
"getset",
"hashbrown 0.14.3",
"inherit-methods-macro",
"int-to-c-enum",
"intrusive-collections",
@ -222,6 +223,7 @@ dependencies = [
"log",
"lru",
"pod",
"rand",
"ringbuf",
"smoltcp",
"spin 0.9.8",

View File

@ -13,6 +13,7 @@ ENABLE_KVM ?= 1
INTEL_TDX ?= 0
SKIP_GRUB_MENU ?= 1
SYSCALL_TEST_DIR ?= /tmp
EXTRA_BLOCKLISTS_DIRS ?= ""
RELEASE_MODE ?= 0
# End of auto test features.
@ -23,6 +24,7 @@ CARGO_OSDK_ARGS :=
ifeq ($(AUTO_TEST), syscall)
BUILD_SYSCALL_TEST := 1
CARGO_OSDK_ARGS += --kcmd_args="SYSCALL_TEST_DIR=$(SYSCALL_TEST_DIR)"
CARGO_OSDK_ARGS += --kcmd_args="EXTRA_BLOCKLISTS_DIRS=$(EXTRA_BLOCKLISTS_DIRS)"
CARGO_OSDK_ARGS += --init_args="/opt/syscall_test/run_syscall_test.sh"
endif
ifeq ($(AUTO_TEST), regression)
@ -182,4 +184,4 @@ check: $(CARGO_OSDK)
clean:
@cargo clean
@cd docs && mdbook clean
@make --no-print-directory -C regression clean
@make --no-print-directory -C regression clean

View File

@ -18,6 +18,7 @@ ovmf = "/root/ovmf/release"
machine = "q35"
drive_files = [
["regression/build/ext2.img", "if=none,format=raw,id=x0"],
["regression/build/exfat.img", "if=none,format=raw,id=x1"],
]
args = [
"--no-reboot",
@ -30,7 +31,8 @@ args = [
"-device isa-debug-exit,iobase=0xf4,iosize=0x04",
"-object filter-dump,id=filter0,netdev=net01,file=virtio-net.pcap",
"-netdev user,id=net01,hostfwd=tcp::36788-:22,hostfwd=tcp::55834-:8080",
"-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,disable-legacy=on,disable-modern=off",
"-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off",
"-device virtio-blk-pci,bus=pcie.0,addr=0x7,drive=x1,serial=vexfat,disable-legacy=on,disable-modern=off",
"-device virtio-keyboard-pci,disable-legacy=on,disable-modern=off",
"-device virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off",
"-device virtio-serial-pci,disable-legacy=on,disable-modern=off",
@ -41,6 +43,7 @@ args = [
machine = "q35"
drive_files = [
["regression/build/ext2.img", "if=none,format=raw,id=x0"],
["regression/build/exfat.img", "if=none,format=raw,id=x1"],
]
args = [
"--no-reboot",
@ -53,7 +56,8 @@ args = [
"-device isa-debug-exit,iobase=0xf4,iosize=0x04",
"-object filter-dump,id=filter0,netdev=net01,file=virtio-net.pcap",
"-netdev user,id=net01,hostfwd=tcp::36788-:22,hostfwd=tcp::55834-:8080",
"-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
"-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
"-device virtio-blk-pci,bus=pcie.0,addr=0x7,drive=x1,serial=vexfat,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
"-device virtio-keyboard-pci,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
"-device virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
"-device virtio-serial-pci,disable-legacy=on,disable-modern=off,iommu_platform=on,ats=on",
@ -66,6 +70,7 @@ args = [
machine = "microvm"
drive_files = [
["regression/build/ext2.img", "if=none,format=raw,id=x0"],
["regression/build/exfat.img", "if=none,format=raw,id=x1"],
]
args = [
"--no-reboot",
@ -80,9 +85,10 @@ args = [
"-netdev user,id=net01,hostfwd=tcp::36788-:22,hostfwd=tcp::55834-:8080",
"-nodefaults",
"-no-user-config",
"-device virtio-blk-device,drive=x0",
"-device virtio-blk-device,drive=x0,serial=vext2",
"-device virtio-blk-device,drive=x1,serial=vexfat",
"-device virtio-keyboard-device",
"-device virtio-net-device,netdev=net01",
"-device virtio-serial-device",
"-device virtconsole,chardev=mux",
]
]

View File

@ -62,6 +62,8 @@ getrandom = { version = "0.2.10", default-features = false, features = [
"rdrand",
] }
bitvec = { version = "1.0", default-features = false, features = ["alloc"] }
hashbrown = "0.14"
rand = {version = "0.8.5", default-features = false, features = ["small_rng"]}
static_assertions = "1.1.0"
inherit-methods-macro = { git = "https://github.com/asterinas/inherit-methods-macro", rev = "98f7e3e" }
getset = "0.1.2"

View File

@ -0,0 +1,384 @@
// SPDX-License-Identifier: MPL-2.0
use core::ops::Range;
use align_ext::AlignExt;
use aster_rights::Full;
use bitvec::prelude::*;
use super::{
constants::EXFAT_RESERVED_CLUSTERS,
dentry::{ExfatBitmapDentry, ExfatDentry, ExfatDentryIterator},
fat::{ClusterID, ExfatChain},
fs::ExfatFS,
};
use crate::{fs::exfat::fat::FatChainFlags, prelude::*, vm::vmo::Vmo};
// TODO: use u64
type BitStore = u8;
const BITS_PER_BYTE: usize = 8;
#[derive(Debug, Default)]
pub(super) struct ExfatBitmap {
// Start cluster of allocation bitmap.
chain: ExfatChain,
bitvec: BitVec<BitStore>,
dirty_bytes: VecDeque<Range<usize>>,
// Used to track the number of free clusters.
num_free_cluster: u32,
fs: Weak<ExfatFS>,
}
impl ExfatBitmap {
pub(super) fn load(
fs_weak: Weak<ExfatFS>,
root_page_cache: Vmo<Full>,
root_chain: ExfatChain,
) -> Result<Self> {
let dentry_iterator = ExfatDentryIterator::new(root_page_cache, 0, None)?;
for dentry_result in dentry_iterator {
let dentry = dentry_result?;
if let ExfatDentry::Bitmap(bitmap_dentry) = dentry {
// If the last bit of bitmap is 0, it is a valid bitmap.
if (bitmap_dentry.flags & 0x1) == 0 {
return Self::load_bitmap_from_dentry(fs_weak.clone(), &bitmap_dentry);
}
}
}
return_errno_with_message!(Errno::EINVAL, "bitmap not found")
}
fn load_bitmap_from_dentry(fs_weak: Weak<ExfatFS>, dentry: &ExfatBitmapDentry) -> Result<Self> {
let fs = fs_weak.upgrade().unwrap();
let num_clusters = (dentry.size as usize).align_up(fs.cluster_size()) / fs.cluster_size();
let chain = ExfatChain::new(
fs_weak.clone(),
dentry.start_cluster,
Some(num_clusters as u32),
FatChainFlags::ALLOC_POSSIBLE,
)?;
let mut buf = vec![0; dentry.size as usize];
fs.read_meta_at(chain.physical_cluster_start_offset(), &mut buf)?;
let mut free_cluster_num = 0;
for idx in 0..fs.super_block().num_clusters - EXFAT_RESERVED_CLUSTERS {
if (buf[idx as usize / BITS_PER_BYTE] & (1 << (idx % BITS_PER_BYTE as u32))) == 0 {
free_cluster_num += 1;
}
}
Ok(ExfatBitmap {
chain,
bitvec: BitVec::from_slice(&buf),
dirty_bytes: VecDeque::new(),
num_free_cluster: free_cluster_num,
fs: fs_weak,
})
}
fn fs(&self) -> Arc<ExfatFS> {
self.fs.upgrade().unwrap()
}
fn is_used(&self, bit: usize) -> bool {
*(self.bitvec.get(bit).unwrap())
}
pub(super) fn set_used(&mut self, cluster: u32, sync: bool) -> Result<()> {
self.set_range(cluster..cluster + 1, true, sync)
}
pub(super) fn set_unused(&mut self, cluster: u32, sync: bool) -> Result<()> {
self.set_range(cluster..cluster + 1, false, sync)
}
pub(super) fn set_range_used(&mut self, clusters: Range<ClusterID>, sync: bool) -> Result<()> {
self.set_range(clusters, true, sync)
}
pub(super) fn set_range_unused(
&mut self,
clusters: Range<ClusterID>,
sync: bool,
) -> Result<()> {
self.set_range(clusters, false, sync)
}
pub(super) fn is_cluster_unused(&self, cluster: u32) -> Result<bool> {
self.is_cluster_range_unused(cluster..cluster + 1)
}
pub(super) fn is_cluster_range_unused(&self, clusters: Range<ClusterID>) -> Result<bool> {
if !self.fs().is_cluster_range_valid(clusters.clone()) {
return_errno_with_message!(Errno::EINVAL, "invalid cluster ranges.")
}
for id in clusters {
if self.bitvec[(id - EXFAT_RESERVED_CLUSTERS) as usize] {
return Ok(false);
}
}
Ok(true)
}
/// Return the first unused cluster.
pub(super) fn find_next_unused_cluster(&self, cluster: ClusterID) -> Result<ClusterID> {
let clusters = self.find_next_unused_cluster_range_by_bits(cluster, 1)?;
Ok(clusters.start)
}
/// Return the first unused cluster range, set num_clusters=1 to find a single cluster.
fn find_next_unused_cluster_range_by_bits(
&self,
search_start_cluster: ClusterID,
num_clusters: u32,
) -> Result<Range<ClusterID>> {
if !self
.fs()
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
{
return_errno_with_message!(Errno::ENOSPC, "free contigous clusters not avalable.")
}
let mut cur_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;
let end_index = self.fs().super_block().num_clusters - EXFAT_RESERVED_CLUSTERS;
let search_end_index = end_index - num_clusters + 1;
let mut range_start_index: ClusterID;
while cur_index < search_end_index {
if !self.is_used(cur_index as usize) {
range_start_index = cur_index;
let mut cnt = 0;
while cnt < num_clusters
&& cur_index < end_index
&& !self.is_used(cur_index as usize)
{
cnt += 1;
cur_index += 1;
}
if cnt >= num_clusters {
return Ok(range_start_index + EXFAT_RESERVED_CLUSTERS
..range_start_index + EXFAT_RESERVED_CLUSTERS + num_clusters);
}
}
cur_index += 1;
}
return_errno!(Errno::ENOSPC)
}
/// Make sure the bit at the range start position is 0.
fn adjust_head_pos(
&self,
bytes: &[BitStore],
mut cur_unit_index: u32,
mut cur_unit_offset: u32,
total_cluster_num: u32,
) -> (u32, u32) {
let unit_size: u32 = (BITS_PER_BYTE * core::mem::size_of::<BitStore>()) as u32;
while cur_unit_index < total_cluster_num {
let leading_zeros = bytes[cur_unit_index as usize].leading_zeros();
let head_cluster_num = unit_size - cur_unit_offset;
if leading_zeros == 0 {
// Fall over to the next unit, we need to continue checking.
cur_unit_index += 1;
cur_unit_offset = 0;
} else {
// Stop at current unit, we may need to adjust the cur_offset
cur_unit_offset = cur_unit_offset.max(unit_size - leading_zeros);
break;
}
}
(cur_unit_index, cur_unit_offset)
}
/// Check if the next mid_unit_num units are zero.
/// If not, return the index of the first not zero unit.
fn check_mid_units(&self, bytes: &[BitStore], cur_unit_index: u32, mid_unit_num: u32) -> u32 {
for i in 1..mid_unit_num + 1 {
if bytes[(cur_unit_index + i) as usize] != 0 {
return cur_unit_index + 1;
}
}
cur_unit_index
}
/// Check if the tail unit is valid.
/// Currently not used.
fn check_tail_bits(
&self,
bytes: &[BitStore],
tail_idx: u32,
tail_cluster_num: u32,
complete_unit_num: u32,
rest_cluster_num: u32,
) -> bool {
let valid_bytes_num = if rest_cluster_num > 0 {
complete_unit_num + 1
} else {
complete_unit_num
};
let mut tail_byte: u8 = 0;
if tail_idx == complete_unit_num {
tail_byte |= 0xFF_u8 - ((1_u8 << rest_cluster_num) - 1);
}
if tail_idx < valid_bytes_num {
tail_byte |= bytes[tail_idx as usize];
}
let tailing_zeros = tail_byte.trailing_zeros();
tailing_zeros >= tail_cluster_num
}
fn make_range(
&self,
cur_unit_index: u32,
cur_unit_offset: u32,
num_clusters: u32,
) -> Range<ClusterID> {
let unit_size: u32 = (BITS_PER_BYTE * core::mem::size_of::<BitStore>()) as u32;
let result_bit_index = cur_unit_index * unit_size + cur_unit_offset;
result_bit_index + EXFAT_RESERVED_CLUSTERS
..result_bit_index + EXFAT_RESERVED_CLUSTERS + num_clusters
}
/// Return the next contiguous unused clusters, set cluster_num=1 to find a single cluster
pub(super) fn find_next_unused_cluster_range(
&self,
search_start_cluster: ClusterID,
num_clusters: u32,
) -> Result<Range<ClusterID>> {
if !self
.fs()
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
{
return_errno!(Errno::ENOSPC)
}
let bytes: &[BitStore] = self.bitvec.as_raw_slice();
let unit_size: u32 = (BITS_PER_BYTE * core::mem::size_of::<BitStore>()) as u32;
let start_cluster_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;
let mut cur_unit_index = start_cluster_index / unit_size;
let mut cur_unit_offset = start_cluster_index % unit_size;
let total_cluster_num = self.fs().super_block().num_clusters - EXFAT_RESERVED_CLUSTERS;
let complete_unit_num = total_cluster_num / unit_size;
let rest_cluster_num = total_cluster_num % unit_size;
let valid_bytes_num = if rest_cluster_num > 0 {
complete_unit_num + 1
} else {
complete_unit_num
};
if num_clusters <= unit_size {
// If this case, back to the simple function
return self.find_next_unused_cluster_range_by_bits(search_start_cluster, num_clusters);
}
// Treat a continuous bit chunk as lead_bits+mid_units+tail_bits (mid_units are unit aligned)
// For example: 11110000 00000000 00000000 00111111
// **** -------- -------- ..
// ^(start bit)
// (*): head_bits; (-): mid_units; (.): tail_bits
// The start bit can be identified with a pair (cur_unit_index, cur_unit_offset)
while cur_unit_index < complete_unit_num {
// First, adjust the cur_idx to a proper head.
(cur_unit_index, cur_unit_offset) =
self.adjust_head_pos(bytes, cur_unit_index, cur_unit_offset, total_cluster_num);
let head_cluster_num = unit_size - cur_unit_offset;
let mid_unit_num = (num_clusters - head_cluster_num) / unit_size;
let tail_cluster_num = (num_clusters - head_cluster_num) % unit_size;
// If the last complete unit to be check is out of range, stop searching
if cur_unit_index + mid_unit_num >= complete_unit_num {
break;
}
// Then check for the mid units, these units should be all zero
// Due to previous check, there will be no array out of bounds situation
let ret = self.check_mid_units(bytes, cur_unit_index, mid_unit_num);
if ret != cur_unit_index {
// Mid_checks failed, should go back to the first step.
cur_unit_index = ret;
cur_unit_offset = 0;
continue;
}
// At last, check for the tail bits
if tail_cluster_num == 0 {
return Ok(self.make_range(cur_unit_index, cur_unit_offset, num_clusters));
}
let mut tail_byte: u8 = 0;
let tail_idx = cur_unit_index + mid_unit_num + 1;
if tail_idx == complete_unit_num {
tail_byte |= 0xFF_u8 - ((1_u8 << rest_cluster_num) - 1);
}
if tail_idx < valid_bytes_num {
tail_byte |= bytes[tail_idx as usize];
}
let tailing_zeros = tail_byte.trailing_zeros();
if tail_cluster_num > tailing_zeros {
cur_unit_index = tail_idx;
cur_unit_offset = tailing_zeros + 1;
continue;
}
// If we reach here, it means we have found a result
return Ok(self.make_range(cur_unit_index, cur_unit_offset, num_clusters));
}
return_errno!(Errno::ENOSPC)
}
pub(super) fn num_free_clusters(&self) -> u32 {
self.num_free_cluster
}
fn set_range(&mut self, clusters: Range<ClusterID>, bit: bool, sync: bool) -> Result<()> {
if !self.fs().is_cluster_range_valid(clusters.clone()) {
return_errno_with_message!(Errno::EINVAL, "invalid cluster ranges.")
}
for cluster_id in clusters.clone() {
let index = (cluster_id - EXFAT_RESERVED_CLUSTERS) as usize;
let old_bit = self.is_used(index);
self.bitvec.set(index, bit);
if !old_bit && bit {
self.num_free_cluster -= 1;
} else if old_bit && !bit {
self.num_free_cluster += 1;
}
}
self.write_to_disk(clusters.clone(), sync)?;
Ok(())
}
fn write_to_disk(&mut self, clusters: Range<ClusterID>, sync: bool) -> Result<()> {
let unit_size = core::mem::size_of::<BitStore>() * BITS_PER_BYTE;
let start_byte_off: usize = (clusters.start - EXFAT_RESERVED_CLUSTERS) as usize / unit_size;
let end_byte_off: usize =
((clusters.end - EXFAT_RESERVED_CLUSTERS) as usize).align_up(unit_size) / unit_size;
let bytes: &[BitStore] = self.bitvec.as_raw_slice();
let byte_chunk = &bytes[start_byte_off..end_byte_off];
let pos = self.chain.walk_to_cluster_at_offset(start_byte_off)?;
let phys_offset = pos.0.physical_cluster_start_offset() + pos.1;
self.fs().write_meta_at(phys_offset, byte_chunk)?;
let byte_range = phys_offset..phys_offset + byte_chunk.len();
if sync {
self.fs().sync_meta_at(byte_range.clone())?;
} else {
self.dirty_bytes.push_back(byte_range.clone());
}
Ok(())
}
pub(super) fn sync(&mut self) -> Result<()> {
while let Some(range) = self.dirty_bytes.pop_front() {
self.fs().sync_meta_at(range)?;
}
Ok(())
}
}

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: MPL-2.0
pub(super) const ROOT_INODE_HASH: usize = 0;
// Other pub(super) constants
pub(super) const MAX_CHARSET_SIZE: usize = 6;
pub(super) const MAX_NAME_LENGTH: usize = 255;
pub(super) const MAX_VFSNAME_BUF_SIZE: usize = (MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE;
pub(super) const BOOT_SIGNATURE: u16 = 0xAA55;
pub(super) const EXBOOT_SIGNATURE: u32 = 0xAA550000;
pub(super) const STR_EXFAT: &str = "EXFAT "; // size should be 8
pub(super) const VOLUME_DIRTY: u16 = 0x0002;
pub(super) const MEDIA_FAILURE: u16 = 0x0004;
// Cluster 0, 1 are reserved, the first cluster is 2 in the cluster heap.
pub(super) const EXFAT_RESERVED_CLUSTERS: u32 = 2;
pub(super) const EXFAT_FIRST_CLUSTER: u32 = 2;
// exFAT allows 8388608(256MB) directory entries
pub(super) const EXFAT_MAX_DENTRIES: u32 = 8388608;
pub(super) const EXFAT_FILE_NAME_LEN: usize = 15;
pub(super) const EXFAT_MIN_SECT_SIZE_BITS: u8 = 9;
pub(super) const EXFAT_MAX_SECT_SIZE_BITS: u8 = 12;
// Timestamp constants
pub(super) const EXFAT_MIN_TIMESTAMP_SECS: u64 = 315532800;
pub(super) const EXFAT_MAX_TIMESTAMP_SECS: u64 = 4354819199;
pub(super) const UNICODE_SIZE: usize = 2;

View File

@ -0,0 +1,747 @@
// SPDX-License-Identifier: MPL-2.0
use core::ops::Range;
use aster_frame::vm::VmIo;
use aster_rights::Full;
use super::{
constants::{EXFAT_FILE_NAME_LEN, MAX_NAME_LENGTH},
fat::FatChainFlags,
fs::ExfatFS,
inode::FatAttr,
upcase_table::ExfatUpcaseTable,
utils::{calc_checksum_16, DosTimestamp},
};
use crate::{
fs::utils::{InodeMode, InodeType},
prelude::*,
vm::vmo::Vmo,
};
pub(super) const DENTRY_SIZE: usize = 32; // directory entry size
#[derive(Debug, Clone, Copy)]
pub(super) enum ExfatDentry {
File(ExfatFileDentry),
Stream(ExfatStreamDentry),
Name(ExfatNameDentry),
Bitmap(ExfatBitmapDentry),
Upcase(ExfatUpcaseDentry),
VendorExt(ExfatVendorExtDentry),
VendorAlloc(ExfatVendorAllocDentry),
GenericPrimary(ExfatGenericPrimaryDentry),
GenericSecondary(ExfatGenericSecondaryDentry),
Deleted(ExfatDeletedDentry),
UnUsed,
}
impl ExfatDentry {
fn as_le_bytes(&self) -> &[u8] {
match self {
ExfatDentry::File(file) => file.as_bytes(),
ExfatDentry::Stream(stream) => stream.as_bytes(),
ExfatDentry::Name(name) => name.as_bytes(),
ExfatDentry::Bitmap(bitmap) => bitmap.as_bytes(),
ExfatDentry::Upcase(upcase) => upcase.as_bytes(),
ExfatDentry::VendorExt(vendor_ext) => vendor_ext.as_bytes(),
ExfatDentry::GenericPrimary(primary) => primary.as_bytes(),
ExfatDentry::GenericSecondary(secondary) => secondary.as_bytes(),
ExfatDentry::Deleted(deleted) => deleted.as_bytes(),
_ => &[0; DENTRY_SIZE],
}
}
}
const EXFAT_UNUSED: u8 = 0x00;
const EXFAT_INVAL: u8 = 0x80;
const EXFAT_BITMAP: u8 = 0x81;
const EXFAT_UPCASE: u8 = 0x82;
const EXFAT_VOLUME: u8 = 0x83;
const EXFAT_FILE: u8 = 0x85;
const EXFAT_GUID: u8 = 0xA0;
const EXFAT_PADDING: u8 = 0xA1;
const EXFAT_ACLTAB: u8 = 0xA2;
const EXFAT_STREAM: u8 = 0xC0;
const EXFAT_NAME: u8 = 0xC1;
const EXFAT_ACL: u8 = 0xC2;
const EXFAT_VENDOR_EXT: u8 = 0xE0;
const EXFAT_VENDOR_ALLOC: u8 = 0xE1;
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct RawExfatDentry {
pub(super) dentry_type: u8,
pub(super) value: [u8; 31],
}
impl TryFrom<RawExfatDentry> for ExfatDentry {
type Error = crate::error::Error;
fn try_from(dentry: RawExfatDentry) -> Result<Self> {
let dentry_bytes = dentry.as_bytes();
match dentry.dentry_type {
EXFAT_FILE => Ok(ExfatDentry::File(ExfatFileDentry::from_bytes(dentry_bytes))),
EXFAT_STREAM => Ok(ExfatDentry::Stream(ExfatStreamDentry::from_bytes(
dentry_bytes,
))),
EXFAT_NAME => Ok(ExfatDentry::Name(ExfatNameDentry::from_bytes(dentry_bytes))),
EXFAT_BITMAP => Ok(ExfatDentry::Bitmap(ExfatBitmapDentry::from_bytes(
dentry_bytes,
))),
EXFAT_UPCASE => Ok(ExfatDentry::Upcase(ExfatUpcaseDentry::from_bytes(
dentry_bytes,
))),
EXFAT_VENDOR_EXT => Ok(ExfatDentry::VendorExt(ExfatVendorExtDentry::from_bytes(
dentry_bytes,
))),
EXFAT_VENDOR_ALLOC => Ok(ExfatDentry::VendorAlloc(
ExfatVendorAllocDentry::from_bytes(dentry_bytes),
)),
EXFAT_UNUSED => Ok(ExfatDentry::UnUsed),
// Deleted
0x01..0x80 => Ok(ExfatDentry::Deleted(ExfatDeletedDentry::from_bytes(
dentry_bytes,
))),
// Primary
0x80..0xC0 => Ok(ExfatDentry::GenericPrimary(
ExfatGenericPrimaryDentry::from_bytes(dentry_bytes),
)),
// Secondary
0xC0..=0xFF => Ok(ExfatDentry::GenericSecondary(
ExfatGenericSecondaryDentry::from_bytes(dentry_bytes),
)),
}
}
}
// State machine used to validate dentry set.
enum ExfatValidateDentryMode {
Started,
GetFile,
GetStream,
// 17 name dentires at maximal.
GetName(usize),
GetBenignSecondary,
}
impl ExfatValidateDentryMode {
fn transit_to_next_state(&self, dentry: &ExfatDentry) -> Result<Self> {
const MAX_NAME_DENTRIES: usize = MAX_NAME_LENGTH / EXFAT_FILE_NAME_LEN;
match self {
ExfatValidateDentryMode::Started => {
if matches!(dentry, ExfatDentry::File(_)) {
Ok(ExfatValidateDentryMode::GetFile)
} else {
return_errno_with_message!(Errno::EINVAL, "invalid dentry state machine")
}
}
ExfatValidateDentryMode::GetFile => {
if matches!(dentry, ExfatDentry::Stream(_)) {
Ok(ExfatValidateDentryMode::GetStream)
} else {
return_errno_with_message!(Errno::EINVAL, "invalid dentry state machine")
}
}
ExfatValidateDentryMode::GetStream => {
if matches!(dentry, ExfatDentry::Name(_)) {
Ok(ExfatValidateDentryMode::GetName(0))
} else {
return_errno_with_message!(Errno::EINVAL, "invalid dentry state machine")
}
}
ExfatValidateDentryMode::GetName(count) => {
if count + 1 < MAX_NAME_DENTRIES && matches!(dentry, ExfatDentry::Name(_)) {
Ok(ExfatValidateDentryMode::GetName(count + 1))
} else if matches!(dentry, ExfatDentry::GenericSecondary(_))
|| matches!(dentry, ExfatDentry::VendorAlloc(_))
|| matches!(dentry, ExfatDentry::VendorExt(_))
{
Ok(ExfatValidateDentryMode::GetBenignSecondary)
} else {
return_errno_with_message!(Errno::EINVAL, "invalid dentry state machine")
}
}
ExfatValidateDentryMode::GetBenignSecondary => {
if matches!(dentry, ExfatDentry::GenericSecondary(_))
|| matches!(dentry, ExfatDentry::VendorAlloc(_))
|| matches!(dentry, ExfatDentry::VendorExt(_))
{
Ok(ExfatValidateDentryMode::GetBenignSecondary)
} else {
return_errno_with_message!(Errno::EINVAL, "invalid dentry state machine")
}
}
}
}
}
pub trait Checksum {
fn verify_checksum(&self) -> bool;
fn update_checksum(&mut self);
}
/// A set of dentries that collectively describe a file or folder.
/// Root directory cannot be represented as an ordinal dentryset.
pub(super) struct ExfatDentrySet {
dentries: Vec<ExfatDentry>,
}
impl ExfatDentrySet {
/// Entry set indexes
/// File dentry index.
const ES_IDX_FILE: usize = 0;
/// Stream dentry index.
const ES_IDX_STREAM: usize = 1;
/// Name dentry index.
const ES_IDX_FIRST_FILENAME: usize = 2;
pub(super) fn new(dentries: Vec<ExfatDentry>, should_checksum_match: bool) -> Result<Self> {
let mut dentry_set = ExfatDentrySet { dentries };
if !should_checksum_match {
dentry_set.update_checksum();
}
dentry_set.validate_dentry_set()?;
Ok(dentry_set)
}
pub(super) fn from(
fs: Arc<ExfatFS>,
name: &str,
inode_type: InodeType,
mode: InodeMode,
) -> Result<Self> {
let attrs = {
if inode_type == InodeType::Dir {
FatAttr::DIRECTORY.bits()
} else {
0
}
};
let name = ExfatName::from_str(name, fs.upcase_table())?;
let mut name_dentries = name.to_dentries();
let dos_time = DosTimestamp::now()?;
let mut dentries = Vec::new();
let file_dentry = ExfatDentry::File(ExfatFileDentry {
dentry_type: EXFAT_FILE,
num_secondary: (name_dentries.len() + 1) as u8,
checksum: 0,
attribute: attrs,
reserved1: 0,
create_utc_offset: dos_time.utc_offset,
create_date: dos_time.date,
create_time: dos_time.time,
create_time_cs: dos_time.increament_10ms,
modify_utc_offset: dos_time.utc_offset,
modify_date: dos_time.date,
modify_time: dos_time.time,
modify_time_cs: dos_time.increament_10ms,
access_utc_offset: dos_time.utc_offset,
access_date: dos_time.date,
access_time: dos_time.time,
reserved2: [0; 7],
});
let stream_dentry = ExfatDentry::Stream(ExfatStreamDentry {
dentry_type: EXFAT_STREAM,
flags: FatChainFlags::FAT_CHAIN_NOT_IN_USE.bits(),
reserved1: 0,
name_len: name.0.len() as u8,
name_hash: name.checksum(),
reserved2: 0,
valid_size: 0,
reserved3: 0,
start_cluster: 0,
size: 0,
});
dentries.push(file_dentry);
dentries.push(stream_dentry);
dentries.append(&mut name_dentries);
Self::new(dentries, false)
}
pub(super) fn read_from(page_cache: Vmo<Full>, offset: usize) -> Result<Self> {
let mut iter = ExfatDentryIterator::new(page_cache.dup().unwrap(), offset, None)?;
let primary_dentry_result = iter.next();
if primary_dentry_result.is_none() {
return_errno!(Errno::ENOENT)
}
let primary_dentry = primary_dentry_result.unwrap()?;
if let ExfatDentry::File(file_dentry) = primary_dentry {
Self::read_from_iterator(&file_dentry, &mut iter)
} else {
return_errno_with_message!(Errno::EIO, "invalid dentry type, file dentry expected")
}
}
pub(super) fn read_from_iterator(
file_dentry: &ExfatFileDentry,
iter: &mut ExfatDentryIterator,
) -> Result<Self> {
let num_secondary = file_dentry.num_secondary as usize;
let mut dentries = Vec::<ExfatDentry>::with_capacity(num_secondary + 1);
dentries.push(ExfatDentry::File(*file_dentry));
for i in 0..num_secondary {
let dentry_result = iter.next();
if dentry_result.is_none() {
return_errno!(Errno::ENOENT);
}
let dentry = dentry_result.unwrap()?;
dentries.push(dentry);
}
Self::new(dentries, true)
}
pub(super) fn len(&self) -> usize {
self.dentries.len()
}
pub(super) fn to_le_bytes(&self) -> Vec<u8> {
// It may be slow to copy at the granularity of byte.
// self.dentries.iter().map(|dentry| dentry.to_le_bytes()).flatten().collect::<Vec<u8>>()
let mut bytes = vec![0; self.dentries.len() * DENTRY_SIZE];
for (i, dentry) in self.dentries.iter().enumerate() {
let dentry_bytes = dentry.as_le_bytes();
let (_, to_write) = bytes.split_at_mut(i * DENTRY_SIZE);
to_write[..DENTRY_SIZE].copy_from_slice(dentry_bytes)
}
bytes
}
fn validate_dentry_set(&self) -> Result<()> {
let mut status = ExfatValidateDentryMode::Started;
// Maximum dentries = 255 + 1(File dentry)
if self.dentries.len() > u8::MAX as usize + 1 {
return_errno_with_message!(Errno::EINVAL, "too many dentries")
}
for dentry in &self.dentries {
status = status.transit_to_next_state(dentry)?;
}
if !matches!(status, ExfatValidateDentryMode::GetName(_))
&& !matches!(status, ExfatValidateDentryMode::GetBenignSecondary)
{
return_errno_with_message!(Errno::EINVAL, "dentries not enough")
}
if !self.verify_checksum() {
return_errno_with_message!(Errno::EINVAL, "checksum mismatched")
}
Ok(())
}
pub(super) fn get_file_dentry(&self) -> ExfatFileDentry {
if let ExfatDentry::File(file) = self.dentries[Self::ES_IDX_FILE] {
file
} else {
panic!("Not possible")
}
}
pub(super) fn set_file_dentry(&mut self, file: &ExfatFileDentry) {
self.dentries[Self::ES_IDX_FILE] = ExfatDentry::File(*file);
}
pub(super) fn get_stream_dentry(&self) -> ExfatStreamDentry {
if let ExfatDentry::Stream(stream) = self.dentries[Self::ES_IDX_STREAM] {
stream
} else {
panic!("Not possible")
}
}
pub(super) fn set_stream_dentry(&mut self, stream: &ExfatStreamDentry) {
self.dentries[Self::ES_IDX_STREAM] = ExfatDentry::Stream(*stream);
}
pub(super) fn get_name(
&self,
upcase_table: Arc<SpinLock<ExfatUpcaseTable>>,
) -> Result<ExfatName> {
let name_dentries: Vec<ExfatNameDentry> = self
.dentries
.iter()
.filter_map(|&dentry| {
if let ExfatDentry::Name(name_dentry) = dentry {
Some(name_dentry)
} else {
None
}
})
.collect();
let name = ExfatName::from_name_dentries(&name_dentries, upcase_table)?;
if name.checksum() != self.get_stream_dentry().name_hash {
return_errno_with_message!(Errno::EINVAL, "name hash mismatched")
}
Ok(name)
}
/// Name dentries are not permited to modify. We should create a new dentry set for renaming.
fn calculate_checksum(&self) -> u16 {
const CHECKSUM_BYTES_RANGE: Range<usize> = 2..4;
const EMPTY_RANGE: Range<usize> = 0..0;
let mut checksum = calc_checksum_16(
self.dentries[Self::ES_IDX_FILE].as_le_bytes(),
CHECKSUM_BYTES_RANGE,
0,
);
for i in 1..self.dentries.len() {
let dentry = &self.dentries[i];
checksum = calc_checksum_16(dentry.as_le_bytes(), EMPTY_RANGE, checksum);
}
checksum
}
}
impl Checksum for ExfatDentrySet {
fn verify_checksum(&self) -> bool {
let checksum = self.calculate_checksum();
let file = self.get_file_dentry();
file.checksum == checksum
}
fn update_checksum(&mut self) {
let checksum = self.calculate_checksum();
let mut file = self.get_file_dentry();
file.checksum = checksum;
self.dentries[Self::ES_IDX_FILE] = ExfatDentry::File(file);
}
}
pub(super) struct ExfatDentryIterator {
/// The dentry position in current inode.
entry: u32,
/// The page cache of the iterated inode.
page_cache: Vmo<Full>,
/// Remaining size that can be iterated. If none, iterate through the whole cluster chain.
size: Option<usize>,
}
impl ExfatDentryIterator {
pub fn new(page_cache: Vmo<Full>, offset: usize, size: Option<usize>) -> Result<Self> {
if size.is_some() && size.unwrap() % DENTRY_SIZE != 0 {
return_errno_with_message!(Errno::EINVAL, "remaining size unaligned to dentry size")
}
if offset % DENTRY_SIZE != 0 {
return_errno_with_message!(Errno::EINVAL, "dentry offset unaligned to dentry size")
}
Ok(Self {
entry: (offset / DENTRY_SIZE) as u32,
page_cache,
size,
})
}
}
impl Iterator for ExfatDentryIterator {
type Item = Result<ExfatDentry>;
fn next(&mut self) -> Option<Self::Item> {
if self.entry as usize * DENTRY_SIZE >= self.page_cache.size() {
return None;
}
if self.size.is_some() && self.size.unwrap() == 0 {
return None;
}
let byte_start = self.entry as usize * DENTRY_SIZE;
let mut dentry_buf = [0u8; DENTRY_SIZE];
let read_result = self.page_cache.read_bytes(byte_start, &mut dentry_buf);
if let Err(e) = read_result {
return Some(Err(Error::with_message(
Errno::EIO,
"Unable to read dentry from page cache.",
)));
}
// The result is always OK.
let dentry_result = ExfatDentry::try_from(RawExfatDentry::from_bytes(&dentry_buf)).unwrap();
self.entry += 1;
if self.size.is_some() {
self.size = Some(self.size.unwrap() - DENTRY_SIZE);
}
Some(Ok(dentry_result))
}
}
/// On-disk dentry formats
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
// For files & directorys
pub(super) struct ExfatFileDentry {
pub(super) dentry_type: u8, // 0x85
// Number of Secondary directory entries.
// 2 to 18 (1 StreamDentry + rest NameDentry)
pub(super) num_secondary: u8,
// Checksum of all directory entries in the given set excluding this field,calculated on file and secondary entries.
pub(super) checksum: u16,
// bit0: read-only; bit1: hidden; bit2: system; bit4: directory; bit5: archive
pub(super) attribute: u16,
pub(super) reserved1: u16,
// Create time, however, ctime in unix metadata means ***change time***.
pub(super) create_time: u16,
pub(super) create_date: u16,
pub(super) modify_time: u16,
pub(super) modify_date: u16,
// The timestamp for access_time has double seconds granularity.
pub(super) access_time: u16,
pub(super) access_date: u16,
// High precision time in 10ms
pub(super) create_time_cs: u8,
pub(super) modify_time_cs: u8,
// Timezone for various time
pub(super) create_utc_offset: u8,
pub(super) modify_utc_offset: u8,
pub(super) access_utc_offset: u8,
pub(super) reserved2: [u8; 7],
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
// MUST be immediately follow the FileDentry (the second dentry in a dentry set)
pub(super) struct ExfatStreamDentry {
pub(super) dentry_type: u8, // 0xC0
pub(super) flags: u8, // bit0: AllocationPossible (must be 1); bit1: NoFatChain (=1 <=> contiguous)
pub(super) reserved1: u8,
pub(super) name_len: u8, // file name length (in Unicode - 2 bytes)
pub(super) name_hash: u16, // something like checksum for file name (calculated in bytes)
pub(super) reserved2: u16,
pub(super) valid_size: u64, // file current size
pub(super) reserved3: u32,
pub(super) start_cluster: u32, // file start cluster
pub(super) size: u64, // file maximum size (not used in init a inode?)
}
pub type UTF16Char = u16;
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
// MUST be immediately follow the StreamDentry in the number of NameLength/15 rounded up
pub(super) struct ExfatNameDentry {
pub(super) dentry_type: u8, // 0xC1
pub(super) flags: u8, // first two bits must be zero
pub(super) unicode_0_14: [UTF16Char; EXFAT_FILE_NAME_LEN], // 15 (or less) characters of file name
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatBitmapDentry {
pub(super) dentry_type: u8,
pub(super) flags: u8,
pub(super) reserved: [u8; 18],
pub(super) start_cluster: u32,
pub(super) size: u64,
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatUpcaseDentry {
pub(super) dentry_type: u8,
pub(super) reserved1: [u8; 3],
pub(super) checksum: u32,
pub(super) reserved2: [u8; 12],
pub(super) start_cluster: u32,
pub(super) size: u64,
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatVendorExtDentry {
pub(super) dentry_type: u8,
pub(super) flags: u8,
pub(super) vendor_guid: [u8; 16],
pub(super) vendor_defined: [u8; 14],
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatVendorAllocDentry {
pub(super) dentry_type: u8,
pub(super) flags: u8,
pub(super) vendor_guid: [u8; 16],
pub(super) vendor_defined: [u8; 2],
pub(super) start_cluster: u32,
pub(super) size: u64,
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatGenericPrimaryDentry {
pub(super) dentry_type: u8,
pub(super) secondary_count: u8,
pub(super) checksum: u16,
pub(super) flags: u16,
pub(super) custom_defined: [u8; 14],
pub(super) start_cluster: u32,
pub(super) size: u64,
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatGenericSecondaryDentry {
pub(super) dentry_type: u8,
pub(super) flags: u8,
pub(super) custom_defined: [u8; 18],
pub(super) start_cluster: u32,
pub(super) size: u64,
}
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatDeletedDentry {
pub(super) dentry_type: u8,
pub(super) reserverd: [u8; 31],
}
#[derive(Default, Debug)]
pub(super) struct ExfatName(Vec<UTF16Char>);
impl ExfatName {
pub fn from_name_dentries(
names: &[ExfatNameDentry],
upcase_table: Arc<SpinLock<ExfatUpcaseTable>>,
) -> Result<Self> {
let mut exfat_name = ExfatName::new();
for name in names {
for value in name.unicode_0_14 {
if value == 0 {
return Ok(exfat_name);
}
exfat_name.push_char(value, upcase_table.clone())?;
}
}
Ok(exfat_name)
}
fn push_char(
&mut self,
value: UTF16Char,
upcase_table: Arc<SpinLock<ExfatUpcaseTable>>,
) -> Result<()> {
if !Self::is_valid_char(value) {
return_errno_with_message!(Errno::EINVAL, "not a valid char")
}
self.0.push(value);
// self.0.push(upcase_table.lock().transform_char_to_upcase(value)?);
Ok(())
}
fn is_valid_char(value: UTF16Char) -> bool {
match value {
0..0x20 => false, // Control Code
0x22 => false, // Quotation Mark
0x2A => false, // Asterisk
0x2F => false, // Forward slash
0x3A => false, // Colon
0x3C => false, // Less-than sign
0x3E => false, // Greater-than sign
0x3F => false, // Question mark
0x5C => false, // Back slash
0x7C => false, // Vertical bar
_ => true,
}
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn checksum(&self) -> u16 {
let bytes = self
.0
.iter()
.flat_map(|character| character.to_le_bytes())
.collect::<Vec<u8>>();
const EMPTY_RANGE: Range<usize> = 0..0;
calc_checksum_16(&bytes, EMPTY_RANGE, 0)
}
pub fn from_str(name: &str, upcase_table: Arc<SpinLock<ExfatUpcaseTable>>) -> Result<Self> {
let name = ExfatName(name.encode_utf16().collect());
// upcase_table.lock().transform_to_upcase(&mut name.0)?;
name.verify()?;
Ok(name)
}
pub fn new() -> Self {
ExfatName(Vec::new())
}
pub fn to_dentries(&self) -> Vec<ExfatDentry> {
let mut name_dentries = Vec::new();
for start in (0..self.0.len()).step_by(EXFAT_FILE_NAME_LEN) {
let end = (start + EXFAT_FILE_NAME_LEN).min(self.0.len());
let mut name: [u16; EXFAT_FILE_NAME_LEN] = [0; EXFAT_FILE_NAME_LEN];
name[..end - start].copy_from_slice(&self.0[start..end]);
name_dentries.push(ExfatDentry::Name(ExfatNameDentry {
dentry_type: EXFAT_NAME,
flags: 0,
unicode_0_14: name,
}))
}
name_dentries
}
pub(super) fn verify(&self) -> Result<()> {
if self
.0
.iter()
.any(|&uni_char| !Self::is_valid_char(uni_char))
{
return_errno_with_message!(Errno::EINVAL, "invalid file name.")
}
// TODO:verify dots
Ok(())
}
}
impl ToString for ExfatName {
fn to_string(&self) -> String {
String::from_utf16_lossy(&self.0)
}
}
impl Clone for ExfatName {
fn clone(&self) -> Self {
ExfatName(self.0.clone())
}
}

View File

@ -0,0 +1,380 @@
// SPDX-License-Identifier: MPL-2.0
use core::mem::size_of;
use super::{
bitmap::ExfatBitmap,
constants::{EXFAT_FIRST_CLUSTER, EXFAT_RESERVED_CLUSTERS},
fs::ExfatFS,
};
use crate::prelude::*;
pub type ClusterID = u32;
pub(super) const FAT_ENTRY_SIZE: usize = size_of::<ClusterID>();
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum FatValue {
Free,
Next(ClusterID),
Bad,
EndOfChain,
}
const EXFAT_EOF_CLUSTER: ClusterID = 0xFFFFFFFF;
const EXFAT_BAD_CLUSTER: ClusterID = 0xFFFFFFF7;
const EXFAT_FREE_CLUSTER: ClusterID = 0;
impl From<ClusterID> for FatValue {
fn from(value: ClusterID) -> Self {
match value {
EXFAT_BAD_CLUSTER => FatValue::Bad,
EXFAT_FREE_CLUSTER => FatValue::Free,
EXFAT_EOF_CLUSTER => FatValue::EndOfChain,
_ => FatValue::Next(value),
}
}
}
impl From<FatValue> for ClusterID {
fn from(val: FatValue) -> Self {
match val {
FatValue::Free => EXFAT_FREE_CLUSTER,
FatValue::EndOfChain => EXFAT_EOF_CLUSTER,
FatValue::Bad => EXFAT_BAD_CLUSTER,
FatValue::Next(x) => x,
}
}
}
bitflags! {
#[derive(Default)]
pub struct FatChainFlags:u8 {
// An associated allocation of clusters is possible
const ALLOC_POSSIBLE = 0x01;
// The allocated clusters are contiguous and fat table is irrevalent.
const FAT_CHAIN_NOT_IN_USE = 0x03;
}
}
#[derive(Debug, Clone, Default)]
pub struct ExfatChain {
// current clusterID
current: ClusterID,
num_clusters: u32,
// use FAT or not
flags: FatChainFlags,
fs: Weak<ExfatFS>,
}
// A position by the chain and relative offset in the cluster.
pub type ExfatChainPosition = (ExfatChain, usize);
impl ExfatChain {
pub(super) fn new(
fs: Weak<ExfatFS>,
current: ClusterID,
num_clusters: Option<u32>,
flags: FatChainFlags,
) -> Result<Self> {
let mut chain = Self {
current,
num_clusters: 0,
flags,
fs,
};
let clusters = {
if let Some(clu) = num_clusters {
clu
} else {
chain.count_clusters()?
}
};
chain.num_clusters = clusters;
Ok(chain)
}
pub(super) fn cluster_size(&self) -> usize {
self.fs().cluster_size()
}
pub(super) fn num_clusters(&self) -> u32 {
self.num_clusters
}
pub(super) fn cluster_id(&self) -> ClusterID {
self.current
}
pub(super) fn flags(&self) -> FatChainFlags {
self.flags
}
fn fat_in_use(&self) -> bool {
!self.flags().contains(FatChainFlags::FAT_CHAIN_NOT_IN_USE)
}
fn set_flags(&mut self, flags: FatChainFlags) {
self.flags = flags;
}
fn fs(&self) -> Arc<ExfatFS> {
self.fs.upgrade().unwrap()
}
pub(super) fn physical_cluster_start_offset(&self) -> usize {
let cluster_num = (self.current - EXFAT_RESERVED_CLUSTERS) as usize;
(cluster_num * self.cluster_size())
+ self.fs().super_block().data_start_sector as usize
* self.fs().super_block().sector_size as usize
}
// Walk to the cluster at the given offset, return the new relative offset
pub(super) fn walk_to_cluster_at_offset(&self, offset: usize) -> Result<ExfatChainPosition> {
let cluster_size = self.fs().cluster_size();
let steps = offset / cluster_size;
let result_chain = self.walk(steps as u32)?;
let result_offset = offset % cluster_size;
Ok((result_chain, result_offset))
}
pub(super) fn is_current_cluster_valid(&self) -> bool {
self.fs().is_valid_cluster(self.current)
}
// When the num_clusters is unknown, we need to count it from the begin.
fn count_clusters(&self) -> Result<u32> {
if !self.fat_in_use() {
return_errno_with_message!(
Errno::EIO,
"Unable to count clusters when FAT table not in use."
)
} else {
let mut cluster = self.current;
let mut cnt = 1;
loop {
let fat = self.fs().read_next_fat(cluster)?;
match fat {
FatValue::Next(next_fat) => {
cluster = next_fat;
cnt += 1;
}
_ => {
return Ok(cnt);
}
}
}
}
}
// The destination cluster must be a valid cluster.
pub(super) fn walk(&self, steps: u32) -> Result<ExfatChain> {
if steps > self.num_clusters {
return_errno_with_message!(Errno::EINVAL, "invalid walking steps for FAT chain")
}
let mut result_cluster = self.current;
if !self.fat_in_use() {
result_cluster = (result_cluster + steps) as ClusterID;
} else {
for _ in 0..steps {
let fat = self.fs().read_next_fat(result_cluster)?;
match fat {
FatValue::Next(next_fat) => result_cluster = next_fat,
_ => return_errno_with_message!(Errno::EIO, "invalid access to FAT cluster"),
}
}
}
ExfatChain::new(
self.fs.clone(),
result_cluster,
Some(self.num_clusters - steps),
self.flags,
)
}
// If current capacity is 0 (no start_cluster), this means we can choose a allocation type
// We first try continuous allocation
// If no continuous allocation available, turn to fat allocation
fn alloc_cluster_from_empty(
&mut self,
num_to_be_allocated: u32,
bitmap: &mut MutexGuard<'_, ExfatBitmap>,
sync_bitmap: bool,
) -> Result<ClusterID> {
// Search for a continuous chunk big enough
let search_result =
bitmap.find_next_unused_cluster_range(EXFAT_FIRST_CLUSTER, num_to_be_allocated);
if let Ok(clusters) = search_result {
bitmap.set_range_used(clusters.clone(), sync_bitmap)?;
self.current = clusters.start;
self.flags = FatChainFlags::FAT_CHAIN_NOT_IN_USE;
Ok(clusters.start)
} else {
let allocated_start_cluster =
self.alloc_cluster_fat(num_to_be_allocated, sync_bitmap, bitmap)?;
self.current = allocated_start_cluster;
self.flags = FatChainFlags::ALLOC_POSSIBLE;
Ok(allocated_start_cluster)
}
}
// Allocate clusters in fat mode, return the first allocated cluster id. Bitmap need to be already locked.
fn alloc_cluster_fat(
&mut self,
num_to_be_allocated: u32,
sync: bool,
bitmap: &mut MutexGuard<'_, ExfatBitmap>,
) -> Result<ClusterID> {
let fs = self.fs();
let mut alloc_start_cluster = 0;
let mut prev_cluster = 0;
let mut cur_cluster = EXFAT_FIRST_CLUSTER;
for i in 0..num_to_be_allocated {
cur_cluster = bitmap.find_next_unused_cluster(cur_cluster)?;
bitmap.set_used(cur_cluster, sync)?;
if i == 0 {
alloc_start_cluster = cur_cluster;
} else {
fs.write_next_fat(prev_cluster, FatValue::Next(cur_cluster), sync)?;
}
prev_cluster = cur_cluster;
}
fs.write_next_fat(prev_cluster, FatValue::EndOfChain, sync)?;
Ok(alloc_start_cluster)
}
fn remove_cluster_fat(
&mut self,
start_physical_cluster: ClusterID,
drop_num: u32,
sync_bitmap: bool,
bitmap: &mut MutexGuard<'_, ExfatBitmap>,
) -> Result<()> {
let fs = self.fs();
let mut cur_cluster = start_physical_cluster;
for i in 0..drop_num {
bitmap.set_unused(cur_cluster, sync_bitmap)?;
match fs.read_next_fat(cur_cluster)? {
FatValue::Next(data) => {
cur_cluster = data;
if i == drop_num - 1 {
return_errno_with_message!(Errno::EINVAL, "invalid fat entry")
}
}
FatValue::EndOfChain => {
if i != drop_num - 1 {
return_errno_with_message!(Errno::EINVAL, "invalid fat entry")
}
}
_ => return_errno_with_message!(Errno::EINVAL, "invalid fat entry"),
}
}
Ok(())
}
}
pub trait ClusterAllocator {
fn extend_clusters(&mut self, num_to_be_allocated: u32, sync: bool) -> Result<ClusterID>;
fn remove_clusters_from_tail(&mut self, free_num: u32, sync: bool) -> Result<()>;
}
impl ClusterAllocator for ExfatChain {
// Append clusters at the end of the chain, return the first allocated cluster
// Caller should update size_allocated accordingly.
// The file system must be locked before calling.
fn extend_clusters(&mut self, num_to_be_allocated: u32, sync: bool) -> Result<ClusterID> {
let fs = self.fs();
let bitmap_binding = fs.bitmap();
let mut bitmap = bitmap_binding.lock();
if num_to_be_allocated > bitmap.num_free_clusters() {
return_errno!(Errno::ENOSPC)
}
if self.num_clusters == 0 {
let allocated =
self.alloc_cluster_from_empty(num_to_be_allocated, &mut bitmap, sync)?;
self.num_clusters += num_to_be_allocated;
return Ok(allocated);
}
let start_cluster = self.cluster_id();
let num_clusters = self.num_clusters;
// Try to alloc contiguously otherwise break the chain.
if !self.fat_in_use() {
// First, check if there are enough following clusters.
// If not, we can give up continuous allocation and turn to fat allocation.
let current_end = start_cluster + num_clusters;
let clusters = current_end..current_end + num_to_be_allocated;
if bitmap.is_cluster_range_unused(clusters.clone())? {
// Considering that the following clusters may be out of range, we should deal with this error here(just turn to fat allocation)
bitmap.set_range_used(clusters, sync)?;
self.num_clusters += num_to_be_allocated;
return Ok(start_cluster);
} else {
// Break the chain.
for i in 0..num_clusters - 1 {
fs.write_next_fat(
start_cluster + i,
FatValue::Next(start_cluster + i + 1),
sync,
)?;
}
fs.write_next_fat(start_cluster + num_clusters - 1, FatValue::EndOfChain, sync)?;
self.set_flags(FatChainFlags::ALLOC_POSSIBLE);
}
}
// Allocate remaining clusters the tail.
let allocated_start_cluster =
self.alloc_cluster_fat(num_to_be_allocated, sync, &mut bitmap)?;
// Insert allocated clusters to the tail.
let tail_cluster = self.walk(num_clusters - 1)?.cluster_id();
fs.write_next_fat(tail_cluster, FatValue::Next(allocated_start_cluster), sync)?;
self.num_clusters += num_to_be_allocated;
Ok(allocated_start_cluster)
}
fn remove_clusters_from_tail(&mut self, drop_num: u32, sync: bool) -> Result<()> {
let fs = self.fs();
let num_clusters = self.num_clusters;
if drop_num > num_clusters {
return_errno_with_message!(Errno::EINVAL, "invalid free_num")
}
let trunc_start_cluster = self.walk(num_clusters - drop_num)?.cluster_id();
let bitmap_binding = fs.bitmap();
let mut bitmap = bitmap_binding.lock();
if !self.fat_in_use() {
bitmap.set_range_unused(trunc_start_cluster..trunc_start_cluster + drop_num, sync)?;
} else {
self.remove_cluster_fat(trunc_start_cluster, drop_num, sync, &mut bitmap)?;
if drop_num != num_clusters {
let tail_cluster = self.walk(num_clusters - drop_num - 1)?.cluster_id();
self.fs()
.write_next_fat(tail_cluster, FatValue::EndOfChain, sync)?;
}
}
self.num_clusters -= drop_num;
Ok(())
}
}

View File

@ -0,0 +1,435 @@
// SPDX-License-Identifier: MPL-2.0
use core::{num::NonZeroUsize, ops::Range, sync::atomic::AtomicU64};
use aster_block::{id::BlockId, BlockDevice};
use aster_frame::vm::VmFrame;
pub(super) use aster_frame::vm::VmIo;
use hashbrown::HashMap;
use lru::LruCache;
use super::{
bitmap::ExfatBitmap,
fat::{ClusterID, ExfatChain, FatChainFlags, FatValue, FAT_ENTRY_SIZE},
inode::ExfatInode,
super_block::{ExfatBootSector, ExfatSuperBlock},
upcase_table::ExfatUpcaseTable,
};
use crate::{
fs::{
exfat::{constants::*, inode::Ino},
utils::{FileSystem, FsFlags, Inode, PageCache, PageCacheBackend, SuperBlock},
},
prelude::*,
};
#[derive(Debug)]
pub struct ExfatFS {
block_device: Arc<dyn BlockDevice>,
super_block: ExfatSuperBlock,
bitmap: Arc<Mutex<ExfatBitmap>>,
upcase_table: Arc<SpinLock<ExfatUpcaseTable>>,
mount_option: ExfatMountOptions,
//Used for inode allocation.
highest_inode_number: AtomicU64,
//inodes are indexed by their hash_value.
inodes: RwMutex<HashMap<usize, Arc<ExfatInode>>>,
//Cache for fat table
fat_cache: RwLock<LruCache<ClusterID, ClusterID>>,
meta_cache: PageCache,
//A global lock, We need to hold the mutex before accessing bitmap or inode, otherwise there will be deadlocks.
mutex: Mutex<()>,
}
const FAT_LRU_CACHE_SIZE: usize = 1024;
pub(super) const EXFAT_ROOT_INO: Ino = 1;
impl ExfatFS {
pub fn open(
block_device: Arc<dyn BlockDevice>,
mount_option: ExfatMountOptions,
) -> Result<Arc<Self>> {
// Load the super_block
let super_block = Self::read_super_block(block_device.as_ref())?;
let fs_size = super_block.num_clusters as usize * super_block.cluster_size as usize;
let exfat_fs = Arc::new_cyclic(|weak_self| ExfatFS {
block_device,
super_block,
bitmap: Arc::new(Mutex::new(ExfatBitmap::default())),
upcase_table: Arc::new(SpinLock::new(ExfatUpcaseTable::empty())),
mount_option,
highest_inode_number: AtomicU64::new(EXFAT_ROOT_INO + 1),
inodes: RwMutex::new(HashMap::new()),
fat_cache: RwLock::new(LruCache::<ClusterID, ClusterID>::new(
NonZeroUsize::new(FAT_LRU_CACHE_SIZE).unwrap(),
)),
meta_cache: PageCache::with_capacity(fs_size, weak_self.clone() as _).unwrap(),
mutex: Mutex::new(()),
});
// TODO: if the main superblock is corrupted, should we load the backup?
// Verify boot region
Self::verify_boot_region(exfat_fs.block_device())?;
let weak_fs = Arc::downgrade(&exfat_fs);
let root_chain = ExfatChain::new(
weak_fs.clone(),
super_block.root_dir,
None,
FatChainFlags::ALLOC_POSSIBLE,
)?;
let root = ExfatInode::build_root_inode(weak_fs.clone(), root_chain.clone())?;
let upcase_table = ExfatUpcaseTable::load(
weak_fs.clone(),
root.page_cache().unwrap(),
root_chain.clone(),
)?;
let bitmap = ExfatBitmap::load(
weak_fs.clone(),
root.page_cache().unwrap(),
root_chain.clone(),
)?;
*exfat_fs.bitmap.lock() = bitmap;
*exfat_fs.upcase_table.lock() = upcase_table;
// TODO: Handle UTF-8
// TODO: Init NLS Table
exfat_fs.inodes.write().insert(root.hash_index(), root);
Ok(exfat_fs)
}
pub(super) fn alloc_inode_number(&self) -> Ino {
self.highest_inode_number
.fetch_add(1, core::sync::atomic::Ordering::SeqCst)
}
pub(super) fn find_opened_inode(&self, hash: usize) -> Option<Arc<ExfatInode>> {
self.inodes.read().get(&hash).cloned()
}
pub(super) fn remove_inode(&self, hash: usize) {
let _ = self.inodes.write().remove(&hash);
}
pub(super) fn evict_inode(&self, hash: usize) -> Result<()> {
if let Some(inode) = self.inodes.read().get(&hash).cloned() {
if inode.is_deleted() {
inode.reclaim_space()?;
} else {
inode.sync()?;
}
}
self.inodes.write().remove(&hash);
Ok(())
}
pub(super) fn insert_inode(&self, inode: Arc<ExfatInode>) -> Option<Arc<ExfatInode>> {
self.inodes.write().insert(inode.hash_index(), inode)
}
pub(super) fn sync_meta_at(&self, range: core::ops::Range<usize>) -> Result<()> {
self.meta_cache.pages().decommit(range)?;
Ok(())
}
pub(super) fn write_meta_at(&self, offset: usize, buf: &[u8]) -> Result<()> {
self.meta_cache.pages().write_bytes(offset, buf)?;
Ok(())
}
pub(super) fn read_meta_at(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
self.meta_cache.pages().read_bytes(offset, buf)?;
Ok(())
}
pub(super) fn read_next_fat(&self, cluster: ClusterID) -> Result<FatValue> {
{
let mut cache_inner = self.fat_cache.write();
let cache = cache_inner.get(&cluster);
if let Some(&value) = cache {
return Ok(FatValue::from(value));
}
}
let sb: ExfatSuperBlock = self.super_block();
let sector_size = sb.sector_size;
if !self.is_valid_cluster(cluster) {
return_errno_with_message!(Errno::EIO, "invalid access to FAT")
}
let position =
sb.fat1_start_sector * sector_size as u64 + (cluster as u64) * FAT_ENTRY_SIZE as u64;
let mut buf: [u8; FAT_ENTRY_SIZE] = [0; FAT_ENTRY_SIZE];
self.read_meta_at(position as usize, &mut buf)?;
let value = u32::from_le_bytes(buf);
self.fat_cache.write().put(cluster, value);
Ok(FatValue::from(value))
}
pub(super) fn write_next_fat(
&self,
cluster: ClusterID,
value: FatValue,
sync: bool,
) -> Result<()> {
let sb: ExfatSuperBlock = self.super_block();
let sector_size = sb.sector_size;
let raw_value: u32 = value.into();
// We expect the fat table to change less frequently, so we write its content to disk immediately instead of absorbing it.
let position =
sb.fat1_start_sector * sector_size as u64 + (cluster as u64) * FAT_ENTRY_SIZE as u64;
self.write_meta_at(position as usize, &raw_value.to_le_bytes())?;
if sync {
self.sync_meta_at(position as usize..position as usize + FAT_ENTRY_SIZE)?;
}
if sb.fat1_start_sector != sb.fat2_start_sector {
let mirror_position = sb.fat2_start_sector * sector_size as u64
+ (cluster as u64) * FAT_ENTRY_SIZE as u64;
self.write_meta_at(mirror_position as usize, &raw_value.to_le_bytes())?;
if sync {
self.sync_meta_at(
mirror_position as usize..mirror_position as usize + FAT_ENTRY_SIZE,
)?;
}
}
self.fat_cache.write().put(cluster, raw_value);
Ok(())
}
fn verify_boot_region(block_device: &dyn BlockDevice) -> Result<()> {
// TODO: Check boot signature and boot checksum.
Ok(())
}
fn read_super_block(block_device: &dyn BlockDevice) -> Result<ExfatSuperBlock> {
let boot_sector = block_device.read_val::<ExfatBootSector>(0)?;
/* Check the validity of BOOT */
if boot_sector.signature != BOOT_SIGNATURE {
return_errno_with_message!(Errno::EINVAL, "invalid boot record signature");
}
if !boot_sector.fs_name.eq(STR_EXFAT.as_bytes()) {
return_errno_with_message!(Errno::EINVAL, "invalid fs name");
}
/*
* must_be_zero field must be filled with zero to prevent mounting
* from FAT volume.
*/
if boot_sector.must_be_zero.iter().any(|&x| x != 0) {
return_errno_with_message!(
Errno::EINVAL,
"must_be_zero field must be filled with zero"
);
}
if boot_sector.num_fats != 1 && boot_sector.num_fats != 2 {
return_errno_with_message!(Errno::EINVAL, "bogus number of FAT structure");
}
// sect_size_bits could be at least 9 and at most 12.
if boot_sector.sector_size_bits < EXFAT_MIN_SECT_SIZE_BITS
|| boot_sector.sector_size_bits > EXFAT_MAX_SECT_SIZE_BITS
{
return_errno_with_message!(Errno::EINVAL, "bogus sector size bits");
}
if boot_sector.sector_per_cluster_bits + boot_sector.sector_size_bits > 25 {
return_errno_with_message!(Errno::EINVAL, "bogus sector size bits per cluster");
}
let super_block = ExfatSuperBlock::try_from(boot_sector)?;
/* Check consistencies */
if ((super_block.num_fat_sectors as u64) << boot_sector.sector_size_bits)
< (super_block.num_clusters as u64) * 4
{
return_errno_with_message!(Errno::EINVAL, "bogus fat length");
}
if super_block.data_start_sector
< super_block.fat1_start_sector
+ (super_block.num_fat_sectors as u64 * boot_sector.num_fats as u64)
{
return_errno_with_message!(Errno::EINVAL, "bogus data start vector");
}
if (super_block.vol_flags & VOLUME_DIRTY as u32) != 0 {
warn!("Volume was not properly unmounted. Some data may be corrupt. Please run fsck.")
}
if (super_block.vol_flags & MEDIA_FAILURE as u32) != 0 {
warn!("Medium has reported failures. Some data may be lost.")
}
Self::calibrate_blocksize(&super_block, 1 << boot_sector.sector_size_bits)?;
Ok(super_block)
}
fn calibrate_blocksize(super_block: &ExfatSuperBlock, logical_sec: u32) -> Result<()> {
// TODO: logical_sect should be larger than block_size.
Ok(())
}
pub(super) fn block_device(&self) -> &dyn BlockDevice {
self.block_device.as_ref()
}
pub(super) fn super_block(&self) -> ExfatSuperBlock {
self.super_block
}
pub(super) fn bitmap(&self) -> Arc<Mutex<ExfatBitmap>> {
self.bitmap.clone()
}
pub(super) fn upcase_table(&self) -> Arc<SpinLock<ExfatUpcaseTable>> {
self.upcase_table.clone()
}
pub(super) fn root_inode(&self) -> Arc<ExfatInode> {
self.inodes.read().get(&ROOT_INODE_HASH).unwrap().clone()
}
pub(super) fn sector_size(&self) -> usize {
self.super_block.sector_size as usize
}
pub(super) fn fs_size(&self) -> usize {
self.super_block.cluster_size as usize * self.super_block.num_clusters as usize
}
pub(super) fn lock(&self) -> MutexGuard<'_, ()> {
self.mutex.lock()
}
pub(super) fn cluster_size(&self) -> usize {
self.super_block.cluster_size as usize
}
pub(super) fn num_free_clusters(&self) -> u32 {
self.bitmap.lock().num_free_clusters()
}
pub(super) fn cluster_to_off(&self, cluster: u32) -> usize {
(((((cluster - EXFAT_RESERVED_CLUSTERS) as u64) << self.super_block.sect_per_cluster_bits)
+ self.super_block.data_start_sector)
* self.super_block.sector_size as u64) as usize
}
pub(super) fn is_valid_cluster(&self, cluster: u32) -> bool {
cluster >= EXFAT_RESERVED_CLUSTERS && cluster <= self.super_block.num_clusters
}
pub(super) fn is_cluster_range_valid(&self, clusters: Range<ClusterID>) -> bool {
clusters.start >= EXFAT_RESERVED_CLUSTERS && clusters.end <= self.super_block.num_clusters
}
pub(super) fn set_volume_dirty(&mut self) {
todo!();
}
pub fn mount_option(&self) -> ExfatMountOptions {
self.mount_option.clone()
}
}
impl PageCacheBackend for ExfatFS {
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid read size")
}
self.block_device
.read_block_sync(BlockId::new(idx as u64), frame)?;
Ok(())
}
// What if block_size is not equal to page size?
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<()> {
if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid write size")
}
self.block_device
.write_block_sync(BlockId::new(idx as u64), frame)?;
Ok(())
}
fn npages(&self) -> usize {
self.fs_size() / PAGE_SIZE
}
}
impl FileSystem for ExfatFS {
fn sync(&self) -> Result<()> {
for inode in self.inodes.read().values() {
inode.sync()?;
}
self.meta_cache.evict_range(0..self.fs_size())?;
Ok(())
}
fn root_inode(&self) -> Arc<dyn Inode> {
self.root_inode()
}
fn sb(&self) -> SuperBlock {
SuperBlock::new(BOOT_SIGNATURE as u64, self.sector_size(), MAX_NAME_LENGTH)
}
fn flags(&self) -> FsFlags {
FsFlags::DENTRY_UNEVICTABLE
}
}
#[derive(Clone, Debug, Default)]
// Error handling
pub enum ExfatErrorMode {
#[default]
Continue,
Panic,
ReadOnly,
}
#[derive(Clone, Debug, Default)]
//Mount options
pub struct ExfatMountOptions {
pub(super) fs_uid: usize,
pub(super) fs_gid: usize,
pub(super) fs_fmask: u16,
pub(super) fs_dmask: u16,
pub(super) allow_utime: u16,
pub(super) iocharset: String,
pub(super) errors: ExfatErrorMode,
pub(super) utf8: bool,
pub(super) sys_tz: bool,
pub(super) discard: bool,
pub(super) keep_last_dots: bool,
pub(super) time_offset: i32,
pub(super) zero_size_dir: bool,
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,118 @@
// SPDX-License-Identifier: MPL-2.0
use pod::Pod;
use super::constants::{EXFAT_FIRST_CLUSTER, EXFAT_RESERVED_CLUSTERS, MEDIA_FAILURE, VOLUME_DIRTY};
use crate::prelude::*;
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
// The in-memory superblock info
pub struct ExfatSuperBlock {
/// num of sectors in volume
pub num_sectors: u64,
/// num of clusters in volume
pub num_clusters: u32,
/// sector size in bytes
pub sector_size: u32,
/// cluster size in bytes
pub cluster_size: u32,
pub cluster_size_bits: u32,
/// cluster size in sectors
pub sect_per_cluster: u32,
pub sect_per_cluster_bits: u32,
/// FAT1 start sector
pub fat1_start_sector: u64,
/// FAT2 start sector
pub fat2_start_sector: u64,
/// data area start sector
pub data_start_sector: u64,
/// number of FAT sectors
pub num_fat_sectors: u32,
/// root dir cluster
pub root_dir: u32,
/// number of dentries per cluster
pub dentries_per_clu: u32,
/// volume flags
pub vol_flags: u32,
/// volume flags to retain
pub vol_flags_persistent: u32,
/// cluster search pointer
pub cluster_search_ptr: u32,
/// number of used clusters
pub used_clusters: u32,
}
const DENTRY_SIZE_BITS: u32 = 5;
impl TryFrom<ExfatBootSector> for ExfatSuperBlock {
type Error = crate::error::Error;
fn try_from(sector: ExfatBootSector) -> Result<ExfatSuperBlock> {
const EXFAT_CLUSTERS_UNTRACKED: u32 = !0;
let mut block = ExfatSuperBlock {
sect_per_cluster_bits: sector.sector_per_cluster_bits as u32,
sect_per_cluster: 1 << sector.sector_per_cluster_bits as u32,
cluster_size_bits: (sector.sector_per_cluster_bits + sector.sector_size_bits) as u32,
cluster_size: 1 << (sector.sector_per_cluster_bits + sector.sector_size_bits) as u32,
sector_size: 1 << sector.sector_size_bits,
num_fat_sectors: sector.fat_length,
fat1_start_sector: sector.fat_offset as u64,
fat2_start_sector: sector.fat_offset as u64,
data_start_sector: sector.cluster_offset as u64,
num_sectors: sector.vol_length,
num_clusters: sector.cluster_count + EXFAT_RESERVED_CLUSTERS,
root_dir: sector.root_cluster,
vol_flags: sector.vol_flags as u32,
vol_flags_persistent: (sector.vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE)) as u32,
cluster_search_ptr: EXFAT_FIRST_CLUSTER,
used_clusters: EXFAT_CLUSTERS_UNTRACKED,
dentries_per_clu: 1
<< ((sector.sector_per_cluster_bits + sector.sector_size_bits) as u32
- DENTRY_SIZE_BITS),
};
if block.num_fat_sectors == 2 {
block.fat2_start_sector += block.num_fat_sectors as u64;
}
Ok(block)
}
}
pub const BOOTSEC_JUMP_BOOT_LEN: usize = 3;
pub const BOOTSEC_FS_NAME_LEN: usize = 8;
pub const BOOTSEC_OLDBPB_LEN: usize = 53;
// EXFAT: Main and Backup Boot Sector (512 bytes)
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Pod)]
pub(super) struct ExfatBootSector {
pub jmp_boot: [u8; BOOTSEC_JUMP_BOOT_LEN],
pub fs_name: [u8; BOOTSEC_FS_NAME_LEN],
pub must_be_zero: [u8; BOOTSEC_OLDBPB_LEN],
pub partition_offset: u64,
pub vol_length: u64,
pub fat_offset: u32,
pub fat_length: u32,
pub cluster_offset: u32,
pub cluster_count: u32,
pub root_cluster: u32,
pub vol_serial: u32,
pub fs_revision: [u8; 2],
pub vol_flags: u16,
pub sector_size_bits: u8,
pub sector_per_cluster_bits: u8,
pub num_fats: u8,
pub drv_sel: u8,
pub percent_in_use: u8,
pub reserved: [u8; 7],
pub boot_code: [u8; 390],
pub signature: u16,
}

View File

@ -0,0 +1,100 @@
// SPDX-License-Identifier: MPL-2.0
use align_ext::AlignExt;
use aster_rights::Full;
use super::{
constants::UNICODE_SIZE,
dentry::{ExfatDentry, ExfatDentryIterator, ExfatUpcaseDentry, UTF16Char},
fat::ExfatChain,
fs::ExfatFS,
utils::calc_checksum_32,
};
use crate::{fs::exfat::fat::FatChainFlags, prelude::*, vm::vmo::Vmo};
const UPCASE_MANDATORY_SIZE: usize = 128;
#[derive(Debug)]
pub(super) struct ExfatUpcaseTable {
upcase_table: [u16; UPCASE_MANDATORY_SIZE],
fs: Weak<ExfatFS>,
}
impl ExfatUpcaseTable {
pub(super) fn empty() -> Self {
Self {
upcase_table: [0; UPCASE_MANDATORY_SIZE],
fs: Weak::default(),
}
}
pub(super) fn load(
fs_weak: Weak<ExfatFS>,
root_page_cache: Vmo<Full>,
root_chain: ExfatChain,
) -> Result<Self> {
let dentry_iterator = ExfatDentryIterator::new(root_page_cache, 0, None)?;
for dentry_result in dentry_iterator {
let dentry = dentry_result?;
if let ExfatDentry::Upcase(upcase_dentry) = dentry {
return Self::load_table_from_dentry(fs_weak, &upcase_dentry);
}
}
return_errno_with_message!(Errno::EINVAL, "Upcase table not found")
}
fn load_table_from_dentry(fs_weak: Weak<ExfatFS>, dentry: &ExfatUpcaseDentry) -> Result<Self> {
if (dentry.size as usize) < UPCASE_MANDATORY_SIZE * UNICODE_SIZE {
return_errno_with_message!(Errno::EINVAL, "Upcase table too small")
}
let fs = fs_weak.upgrade().unwrap();
let num_clusters = (dentry.size as usize).align_up(fs.cluster_size()) / fs.cluster_size();
let chain = ExfatChain::new(
fs_weak.clone(),
dentry.start_cluster,
Some(num_clusters as u32),
FatChainFlags::ALLOC_POSSIBLE,
)?;
let mut buf = vec![0; dentry.size as usize];
fs.read_meta_at(chain.physical_cluster_start_offset(), &mut buf)?;
if dentry.checksum != calc_checksum_32(&buf) {
return_errno_with_message!(Errno::EINVAL, "invalid checksum")
}
let mut res = ExfatUpcaseTable {
upcase_table: [0; UPCASE_MANDATORY_SIZE],
fs: fs_weak,
};
for i in 0..UPCASE_MANDATORY_SIZE {
res.upcase_table[i] = (buf[2 * i] as u16) | ((buf[2 * i + 1] as u16) << 8);
}
Ok(res)
}
pub(super) fn str_to_upcase(&self, value: &str) -> Result<String> {
// TODO: use upcase table
Ok(value.to_uppercase())
}
pub(super) fn slice_to_upcase(&self, buf: &mut [UTF16Char]) -> Result<()> {
for value in buf {
*value = self.char_to_upcase(*value)?;
}
Ok(())
}
pub(super) fn char_to_upcase(&self, value: UTF16Char) -> Result<UTF16Char> {
if (value as usize) < UPCASE_MANDATORY_SIZE {
Ok(self.upcase_table[value as usize])
} else {
Ok(value)
}
}
}

View File

@ -0,0 +1,175 @@
// SPDX-License-Identifier: MPL-2.0
use core::{ops::Range, time::Duration};
use time::{OffsetDateTime, PrimitiveDateTime, Time};
use super::fat::ClusterID;
use crate::prelude::*;
pub fn make_hash_index(cluster: ClusterID, offset: u32) -> usize {
(cluster as usize) << 32usize | (offset as usize & 0xffffffffusize)
}
pub fn calc_checksum_32(data: &[u8]) -> u32 {
let mut checksum: u32 = 0;
for &value in data {
checksum = ((checksum << 31) | (checksum >> 1)).wrapping_add(value as u32);
}
checksum
}
/// Calculating checksum, ignoring certarin bytes in the range
pub fn calc_checksum_16(data: &[u8], ignore: core::ops::Range<usize>, prev_checksum: u16) -> u16 {
let mut result = prev_checksum;
for (pos, &value) in data.iter().enumerate() {
// Ignore the checksum field
if ignore.contains(&pos) {
continue;
}
result = ((result << 15) | (result >> 1)).wrapping_add(value as u16);
}
result
}
pub fn get_value_from_range(value: u16, range: Range<usize>) -> u16 {
(value >> range.start) & ((1 << (range.end - range.start)) - 1)
}
const DOUBLE_SECOND_RANGE: Range<usize> = 0..5;
const MINUTE_RANGE: Range<usize> = 5..11;
const HOUR_RANGE: Range<usize> = 11..16;
const DAY_RANGE: Range<usize> = 0..5;
const MONTH_RANGE: Range<usize> = 5..9;
const YEAR_RANGE: Range<usize> = 9..16;
const EXFAT_TIME_ZONE_VALID: u8 = 1 << 7;
#[derive(Default, Debug, Clone, Copy)]
pub struct DosTimestamp {
// Timestamp at the precesion of double seconds.
pub(super) time: u16,
pub(super) date: u16,
// Precise time in 10ms.
pub(super) increament_10ms: u8,
pub(super) utc_offset: u8,
}
impl DosTimestamp {
pub fn now() -> Result<Self> {
#[cfg(not(ktest))]
{
use crate::time::now_as_duration;
DosTimestamp::from_duration(now_as_duration(&crate::time::ClockID::CLOCK_REALTIME)?)
}
// When ktesting, the time module has not been initialized yet, return a fake value instead.
#[cfg(ktest)]
{
use crate::time::SystemTime;
return DosTimestamp::from_duration(
SystemTime::UNIX_EPOCH.duration_since(&SystemTime::UNIX_EPOCH)?,
);
}
}
pub fn new(time: u16, date: u16, increament_10ms: u8, utc_offset: u8) -> Result<Self> {
let time = Self {
time,
date,
increament_10ms,
utc_offset,
};
Ok(time)
}
pub fn from_duration(duration: Duration) -> Result<Self> {
// FIXME:UTC offset information is missing.
let date_time_result =
OffsetDateTime::from_unix_timestamp_nanos(duration.as_nanos() as i128);
if date_time_result.is_err() {
return_errno_with_message!(Errno::EINVAL, "failed to parse date time.")
}
let date_time = date_time_result.unwrap();
let time = ((date_time.hour() as u16) << HOUR_RANGE.start)
| ((date_time.minute() as u16) << MINUTE_RANGE.start)
| ((date_time.second() as u16) >> 1);
let date = (((date_time.year() - 1980) as u16) << YEAR_RANGE.start)
| ((date_time.month() as u16) << MONTH_RANGE.start)
| ((date_time.day() as u16) << DAY_RANGE.start);
const NSEC_PER_10MSEC: u32 = 10000000;
let increament_10ms =
(date_time.second() as u32 % 2 * 100 + date_time.nanosecond() / NSEC_PER_10MSEC) as u8;
Ok(Self {
time,
date,
increament_10ms,
utc_offset: 0,
})
}
pub fn as_duration(&self) -> Result<Duration> {
let year = 1980 + get_value_from_range(self.date, YEAR_RANGE) as u32;
let month_result =
time::Month::try_from(get_value_from_range(self.date, MONTH_RANGE) as u8);
if month_result.is_err() {
return_errno_with_message!(Errno::EINVAL, "invalid month")
}
let month = month_result.unwrap();
let day = get_value_from_range(self.date, DAY_RANGE);
let hour = get_value_from_range(self.time, HOUR_RANGE);
let minute = get_value_from_range(self.time, MINUTE_RANGE);
let second = get_value_from_range(self.time, DOUBLE_SECOND_RANGE) * 2;
let day_result = time::Date::from_calendar_date(year as i32, month, day as u8);
if day_result.is_err() {
return_errno_with_message!(Errno::EINVAL, "invalid day")
}
let time_result = Time::from_hms(hour as u8, minute as u8, second as u8);
if time_result.is_err() {
return_errno_with_message!(Errno::EINVAL, "invalid time")
}
let date_time = PrimitiveDateTime::new(day_result.unwrap(), time_result.unwrap());
let mut sec = date_time.assume_utc().unix_timestamp() as u64;
let mut nano_sec: u32 = 0;
if self.increament_10ms != 0 {
const NSEC_PER_MSEC: u32 = 1000000;
sec += self.increament_10ms as u64 / 100;
nano_sec = (self.increament_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
}
/* Adjust timezone to UTC0. */
if (self.utc_offset & EXFAT_TIME_ZONE_VALID) != 0u8 {
sec = Self::ajust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
} else {
// TODO: Use mount info for timezone adjustment.
}
Ok(Duration::new(sec, nano_sec))
}
fn ajust_time_zone(sec: u64, time_zone: u8) -> u64 {
if time_zone <= 0x3F {
sec + Self::time_zone_sec(time_zone)
} else {
sec + Self::time_zone_sec(0x80_u8 - time_zone)
}
}
fn time_zone_sec(x: u8) -> u64 {
// Each time zone represents 15 minutes.
x as u64 * 15 * 60
}
}

View File

@ -1,8 +1,8 @@
// SPDX-License-Identifier: MPL-2.0
pub mod device;
pub mod devpts;
pub mod epoll;
pub mod exfat;
pub mod ext2;
pub mod file_handle;
pub mod file_table;
@ -14,31 +14,52 @@ pub mod ramfs;
pub mod rootfs;
pub mod utils;
use aster_virtio::device::block::{
device::BlockDevice as VirtIoBlockDevice, DEVICE_NAME as VIRTIO_BLOCK_NAME,
};
use aster_block::BlockDevice;
use aster_virtio::device::block::device::BlockDevice as VirtIoBlockDevice;
use crate::{
fs::{ext2::Ext2, fs_resolver::FsPath},
fs::{
exfat::{ExfatFS, ExfatMountOptions},
ext2::Ext2,
fs_resolver::FsPath,
},
prelude::*,
thread::kernel_thread::KernelThreadExt,
};
pub fn lazy_init() {
let block_device = aster_block::get_device(VIRTIO_BLOCK_NAME).unwrap();
let cloned_block_device = block_device.clone();
let task_fn = move || {
info!("spawn the virt-io-block thread");
let virtio_block_device = block_device.downcast_ref::<VirtIoBlockDevice>().unwrap();
loop {
virtio_block_device.handle_requests();
}
};
crate::Thread::spawn_kernel_thread(crate::ThreadOptions::new(task_fn));
let ext2_fs = Ext2::open(cloned_block_device).unwrap();
let target_path = FsPath::try_from("/ext2").unwrap();
println!("[kernel] Mount Ext2 fs at {:?} ", target_path);
self::rootfs::mount_fs_at(ext2_fs, &target_path).unwrap();
fn start_block_device(device_name: &str) -> Result<Arc<dyn BlockDevice>> {
if let Some(device) = aster_block::get_device(device_name) {
let cloned_device = device.clone();
let task_fn = move || {
info!("spawn the virt-io-block thread");
let virtio_block_device = cloned_device.downcast_ref::<VirtIoBlockDevice>().unwrap();
loop {
virtio_block_device.handle_requests();
}
};
crate::Thread::spawn_kernel_thread(crate::ThreadOptions::new(task_fn));
Ok(device)
} else {
return_errno_with_message!(Errno::ENOENT, "Device does not exist")
}
}
pub fn lazy_init() {
//The device name is specified in qemu args as --serial={device_name}
let ext2_device_name = "vext2";
let exfat_device_name = "vexfat";
if let Ok(block_device_ext2) = start_block_device(ext2_device_name) {
let ext2_fs = Ext2::open(block_device_ext2).unwrap();
let target_path = FsPath::try_from("/ext2").unwrap();
println!("[kernel] Mount Ext2 fs at {:?} ", target_path);
self::rootfs::mount_fs_at(ext2_fs, &target_path).unwrap();
}
if let Ok(block_device_exfat) = start_block_device(exfat_device_name) {
let exfat_fs = ExfatFS::open(block_device_exfat, ExfatMountOptions::default()).unwrap();
let target_path = FsPath::try_from("/exfat").unwrap();
println!("[kernel] Mount ExFat fs at {:?} ", target_path);
self::rootfs::mount_fs_at(exfat_fs, &target_path).unwrap();
}
}

View File

@ -14,6 +14,7 @@ pub use inode::{Inode, InodeMode, InodeType, Metadata};
pub use ioctl::IoctlCmd;
pub use mount::MountNode;
pub use page_cache::{PageCache, PageCacheBackend};
pub use random_test::{generate_random_operation, new_fs_in_memory};
pub use status_flags::StatusFlags;
mod access_mode;
@ -28,6 +29,7 @@ mod inode;
mod ioctl;
mod mount;
mod page_cache;
mod random_test;
mod status_flags;
use crate::prelude::*;

View File

@ -0,0 +1,540 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::sync::Arc;
use hashbrown::HashMap;
use rand::{Rng, RngCore};
use super::{Inode, InodeMode, InodeType};
use crate::prelude::*;
pub struct FileInMemory {
pub name: String,
pub inode: Arc<dyn Inode>,
pub valid_len: usize,
pub contents: Vec<u8>,
}
pub struct DirInMemory {
pub depth: u32,
pub name: String,
pub inode: Arc<dyn Inode>,
pub sub_names: Vec<String>,
pub sub_dirs: HashMap<String, DentryInMemory>,
}
pub enum DentryInMemory {
File(FileInMemory),
Dir(DirInMemory),
}
pub enum Operation {
Read(usize, usize),
Write(usize, usize),
Resize(usize),
Create(String, InodeType),
Lookup(String),
Readdir(),
Unlink(String),
Rmdir(String),
Rename(String, String),
}
impl Operation {
const CREATE_FILE_ID: usize = 0;
const CREATE_DIR_ID: usize = 1;
const UNLINK_ID: usize = 2;
const RMDIR_ID: usize = 3;
const LOOKUP_ID: usize = 4;
const READDIR_ID: usize = 5;
const RENAME_ID: usize = 6;
const DIR_OP_NUM: usize = 7;
const READ_ID: usize = 0;
const WRITE_ID: usize = 1;
const RESIZE_ID: usize = 2;
const FILE_OP_NUM: usize = 3;
const MAX_PAGE_PER_FILE: usize = 10;
pub fn generate_random_dir_operation(
dir: &mut DirInMemory,
idx: u32,
rng: &mut dyn RngCore,
) -> Self {
let op_id = rng.gen_range(0..Self::DIR_OP_NUM);
if op_id == Self::CREATE_FILE_ID {
Operation::Create(idx.to_string(), InodeType::File)
} else if op_id == Self::CREATE_DIR_ID {
Operation::Create(idx.to_string(), InodeType::Dir)
} else if op_id == Self::UNLINK_ID && !dir.sub_names.is_empty() {
let rand_idx = rng.gen_range(0..dir.sub_names.len());
let name = dir.sub_names[rand_idx].clone();
Operation::Unlink(name)
} else if op_id == Self::RMDIR_ID && !dir.sub_names.is_empty() {
let rand_idx = rng.gen_range(0..dir.sub_names.len());
let name = dir.sub_names[rand_idx].clone();
Operation::Rmdir(name)
} else if op_id == Self::LOOKUP_ID && !dir.sub_names.is_empty() {
let rand_idx = rng.gen_range(0..dir.sub_names.len());
let name = dir.sub_names[rand_idx].clone();
Operation::Lookup(name)
} else if op_id == Self::READDIR_ID {
Operation::Readdir()
} else if op_id == Self::RENAME_ID && !dir.sub_names.is_empty() {
let rand_old_idx = rng.gen_range(0..dir.sub_names.len());
let old_name = dir.sub_names[rand_old_idx].clone();
let rename_to_an_exist = rng.gen_bool(0.5);
if rename_to_an_exist {
let rand_new_idx = rng.gen_range(0..dir.sub_names.len());
let new_name = dir.sub_names[rand_new_idx].clone();
Operation::Rename(old_name, new_name)
} else {
Operation::Rename(old_name, idx.to_string())
}
} else {
Operation::Create(idx.to_string(), InodeType::File)
}
}
pub fn generate_random_file_operation(
file: &mut FileInMemory,
idx: u32,
rng: &mut dyn RngCore,
) -> Self {
let op_id = rng.gen_range(0..Self::FILE_OP_NUM);
if op_id == Self::READ_ID {
let (offset, len) =
generate_random_offset_len(Self::MAX_PAGE_PER_FILE * PAGE_SIZE, rng);
Operation::Read(offset, len)
} else if op_id == Self::WRITE_ID {
let (offset, len) =
generate_random_offset_len(Self::MAX_PAGE_PER_FILE * PAGE_SIZE, rng);
Operation::Write(offset, len)
} else if op_id == Self::RESIZE_ID {
let pg_num = rng.gen_range(0..Self::MAX_PAGE_PER_FILE);
let new_size = (pg_num * PAGE_SIZE).max(file.contents.len());
Operation::Resize(new_size)
} else {
let valid_len = file.valid_len;
Operation::Read(0, valid_len)
}
}
}
impl DirInMemory {
pub fn remove_sub_names(&mut self, name: &String) {
for idx in 0..self.sub_names.len() {
if self.sub_names[idx].eq(name) {
self.sub_names.remove(idx);
break;
}
}
}
fn test_create(&mut self, name: &String, type_: InodeType) {
info!(
"Create: parent = {:?}, name = {:?}, type = {:?}",
self.name, name, type_
);
let create_result = self.inode.create(name, type_, InodeMode::all());
if self.sub_dirs.contains_key(name) {
assert!(create_result.is_err());
info!(
" create {:?}/{:?} failed: {:?}",
self.name,
name,
create_result.unwrap_err()
);
return;
}
assert!(
create_result.is_ok(),
"Fail to create {:?}: {:?}",
name,
create_result.unwrap_err()
);
info!(
" create {:?}/{:?}({:?}) succeeeded",
self.name, name, type_
);
let new_dentry_in_mem = if type_ == InodeType::File {
let file = FileInMemory {
name: name.clone(),
inode: create_result.unwrap(),
valid_len: 0,
contents: Vec::<u8>::new(),
};
DentryInMemory::File(file)
} else {
DentryInMemory::Dir(DirInMemory {
depth: self.depth + 1,
name: name.clone(),
inode: create_result.unwrap(),
sub_names: Vec::new(),
sub_dirs: HashMap::new(),
})
};
let _ = self.sub_dirs.insert(name.to_string(), new_dentry_in_mem);
self.sub_names.push(name.to_string());
}
fn test_lookup(&self, name: &String) {
info!("Lookup: parent = {:?}, name = {:?}", self.name, name);
let lookup_result = self.inode.lookup(name);
if self.sub_dirs.get(name).is_some() {
assert!(
lookup_result.is_ok(),
"Fail to lookup {:?}: {:?}",
name,
lookup_result.unwrap_err()
);
info!(" lookup {:?}/{:?} succeeded", self.name, name);
} else {
assert!(lookup_result.is_err());
info!(
" lookup {:?}/{:?} failed: {:?}",
self.name,
name,
lookup_result.unwrap_err()
);
}
}
fn test_readdir(&mut self) {
info!("Readdir: parent = {:?}", self.name);
let mut sub: Vec<String> = Vec::new();
let readdir_result = self.inode.readdir_at(0, &mut sub);
assert!(readdir_result.is_ok(), "Fail to read directory",);
assert!(readdir_result.unwrap() == self.sub_dirs.len() + 2);
assert!(sub.len() == self.sub_dirs.len() + 2);
// To remove "." and ".."
sub.remove(0);
sub.remove(0);
sub.sort();
self.sub_names.sort();
for (i, name) in sub.iter().enumerate() {
assert!(
name.eq(&self.sub_names[i]),
"Directory entry mismatch: read {:?} should be {:?}",
name,
self.sub_names[i]
);
}
}
fn test_unlink(&mut self, name: &String) {
info!("Unlink: parent = {:?}, name = {:?}", self.name, name);
let unlink_result = self.inode.unlink(name);
if let Option::Some(sub) = self.sub_dirs.get(name)
&& let DentryInMemory::File(_) = sub
{
assert!(
unlink_result.is_ok(),
"Fail to remove file {:?}/{:?}: {:?}",
self.name,
name,
unlink_result.unwrap_err()
);
info!(" unlink {:?}/{:?} succeeded", self.name, name);
let _ = self.sub_dirs.remove(name);
self.remove_sub_names(name);
} else {
assert!(unlink_result.is_err());
info!(
" unlink {:?}/{:?} failed: {:?}",
self.name,
name,
unlink_result.unwrap_err()
);
}
}
fn test_rmdir(&mut self, name: &String) {
info!("Rmdir: parent = {:?}, name = {:?}", self.name, name);
let rmdir_result = self.inode.rmdir(name);
if let Option::Some(sub) = self.sub_dirs.get(name)
&& let DentryInMemory::Dir(sub_dir) = sub
&& sub_dir.sub_dirs.is_empty()
{
assert!(
rmdir_result.is_ok(),
"Fail to remove directory {:?}/{:?}: {:?}",
self.name,
name,
rmdir_result.unwrap_err()
);
info!(" rmdir {:?}/{:?} succeeded", self.name, name);
let _ = self.sub_dirs.remove(name);
self.remove_sub_names(name);
} else {
assert!(rmdir_result.is_err());
info!(
" rmdir {:?}/{:?} failed: {:?}",
self.name,
name,
rmdir_result.unwrap_err()
);
}
}
fn test_rename(&mut self, old_name: &String, new_name: &String) {
info!(
"Rename: parent = {:?}, old_name = {:?}, target = {:?}, new_name = {:?}",
self.name, old_name, self.name, new_name
);
let rename_result = self.inode.rename(old_name, &self.inode, new_name);
if old_name.eq(new_name) {
assert!(rename_result.is_ok());
info!(
" rename {:?}/{:?} to {:?}/{:?} succeeded",
self.name, old_name, self.name, new_name
);
return;
}
let mut valid_rename: bool = false;
let mut exist: bool = false;
if let Option::Some(old_sub) = self.sub_dirs.get(old_name) {
let exist_new_sub = self.sub_dirs.get(new_name);
match old_sub {
DentryInMemory::File(old_file) => {
if let Option::Some(exist_new_sub_) = exist_new_sub
&& let DentryInMemory::File(exist_new_file) = exist_new_sub_
{
valid_rename = true;
exist = true;
} else if exist_new_sub.is_none() {
valid_rename = true;
}
}
DentryInMemory::Dir(old_dir) => {
if let Option::Some(exist_new_sub_) = exist_new_sub
&& let DentryInMemory::Dir(exist_new_dir) = exist_new_sub_
&& exist_new_dir.sub_dirs.is_empty()
{
valid_rename = true;
exist = true;
} else if exist_new_sub.is_none() {
valid_rename = true;
}
}
}
}
if valid_rename {
assert!(
rename_result.is_ok(),
"Fail to rename {:?}/{:?} to {:?}/{:?}: {:?}",
self.name,
old_name,
self.name,
new_name,
rename_result.unwrap_err()
);
info!(
" rename {:?}/{:?} to {:?}/{:?} succeeded",
self.name, old_name, self.name, new_name
);
let lookup_new_inode_result = self.inode.lookup(new_name);
assert!(
lookup_new_inode_result.is_ok(),
"Fail to lookup new name {:?}: {:?}",
new_name,
lookup_new_inode_result.unwrap_err()
);
let mut old = self.sub_dirs.remove(old_name).unwrap();
self.remove_sub_names(old_name);
match old {
DentryInMemory::Dir(ref mut dir) => {
dir.inode = lookup_new_inode_result.unwrap();
dir.name = new_name.clone();
dir.depth = self.depth + 1;
}
DentryInMemory::File(ref mut file) => {
file.inode = lookup_new_inode_result.unwrap();
file.name = new_name.clone();
}
}
if exist {
let _ = self.sub_dirs.remove(new_name);
self.remove_sub_names(new_name);
}
self.sub_dirs.insert(new_name.to_string(), old);
self.sub_names.push(new_name.to_string());
} else {
assert!(rename_result.is_err());
info!(
" rename {:?}/{:?} to {:?}/{:?} failed: {:?}",
self.name,
old_name,
self.name,
new_name,
rename_result.unwrap_err()
);
}
}
pub fn execute_and_test(&mut self, op: Operation) {
match op {
Operation::Create(name, type_) => self.test_create(&name, type_),
Operation::Lookup(name) => self.test_lookup(&name),
Operation::Readdir() => self.test_readdir(),
Operation::Unlink(name) => self.test_unlink(&name),
Operation::Rmdir(name) => self.test_rmdir(&name),
Operation::Rename(old_name, new_name) => self.test_rename(&old_name, &new_name),
_ => {}
}
}
}
impl FileInMemory {
fn test_read(&self, offset: usize, len: usize) {
info!(
"Read: name = {:?}, offset = {:?}, len = {:?}",
self.name, offset, len
);
let mut buf = vec![0; len];
let read_result = self.inode.read_at(offset, &mut buf);
assert!(
read_result.is_ok(),
"Fail to read file in range [{:?}, {:?}): {:?}",
offset,
offset + len,
read_result.unwrap_err()
);
info!(" read succeeded");
let (start, end) = (
offset.min(self.valid_len),
(offset + len).min(self.valid_len),
);
assert!(
buf[..(end - start)].eq(&self.contents[start..end]),
"Read file contents mismatch"
);
}
fn test_write(&mut self, offset: usize, len: usize, rng: &mut dyn RngCore) {
// Avoid holes in a file.
let (write_start_offset, write_len) = if offset > self.valid_len {
(self.valid_len, len + offset - self.valid_len)
} else {
(offset, len)
};
info!(
"Write: name = {:?}, offset = {:?}, len = {:?}",
self.name, write_start_offset, write_len
);
let mut buf = vec![0; write_len];
rng.fill_bytes(&mut buf);
let write_result = self.inode.write_at(write_start_offset, &buf);
assert!(
write_result.is_ok(),
"Fail to write file in range [{:?}, {:?}): {:?}",
write_start_offset,
write_start_offset + write_len,
write_result.unwrap_err()
);
info!(" write succeeded");
if write_start_offset + write_len > self.contents.len() {
self.contents.resize(write_start_offset + write_len, 0);
}
self.valid_len = self.valid_len.max(write_start_offset + write_len);
self.contents[write_start_offset..write_start_offset + write_len]
.copy_from_slice(&buf[..write_len]);
}
fn test_resize(&mut self, new_size: usize) {
info!("Resize: name = {:?}, new_size = {:?}", self.name, new_size);
// Todo: may need more consideration
let resize_result = self.inode.resize(new_size);
assert!(
resize_result.is_ok(),
"Fail to resize file to {:?}: {:?}",
new_size,
resize_result.unwrap_err()
);
self.contents.resize(new_size, 0);
self.valid_len = self.valid_len.min(new_size);
}
pub fn execute_and_test(&mut self, op: Operation, rng: &mut dyn RngCore) {
match op {
Operation::Read(offset, len) => self.test_read(offset, len),
Operation::Write(offset, len) => self.test_write(offset, len, rng),
Operation::Resize(new_size) => self.test_resize(new_size),
_ => {}
}
}
}
impl DentryInMemory {
pub fn execute_and_test(&mut self, op: Operation, rng: &mut dyn RngCore) {
match self {
DentryInMemory::Dir(dir) => {
dir.execute_and_test(op);
}
DentryInMemory::File(file) => {
file.execute_and_test(op, rng);
}
}
}
pub fn sub_cnt(&self) -> usize {
match self {
DentryInMemory::Dir(dir) => dir.sub_names.len(),
DentryInMemory::File(file) => 0,
}
}
}
fn random_select_from_dir_tree<'a>(
root: &'a mut DentryInMemory,
rng: &mut dyn RngCore,
) -> &'a mut DentryInMemory {
let sub_cnt = root.sub_cnt();
if sub_cnt == 0 {
root
} else {
let stop_get_deeper = rng.gen_bool(0.5);
if stop_get_deeper {
root
} else if let DentryInMemory::Dir(dir) = root {
let sub_idx = rng.gen_range(0..sub_cnt);
let sub = dir.sub_dirs.get_mut(&dir.sub_names[sub_idx]);
let sub_dir = sub.unwrap();
random_select_from_dir_tree(sub_dir, rng)
} else {
unreachable!();
}
}
}
fn generate_random_offset_len(max_size: usize, rng: &mut dyn RngCore) -> (usize, usize) {
let offset = rng.gen_range(0..max_size);
let len = rng.gen_range(0..max_size - offset);
(offset, len)
}
pub fn new_fs_in_memory(root: Arc<dyn Inode>) -> DentryInMemory {
DentryInMemory::Dir(DirInMemory {
depth: 0,
name: (&"root").to_string(),
inode: root,
sub_names: Vec::new(),
sub_dirs: HashMap::new(),
})
}
pub fn generate_random_operation<'a>(
root: &'a mut DentryInMemory,
idx: u32,
rng: &mut dyn RngCore,
) -> (&'a mut DentryInMemory, Operation) {
let dentry = random_select_from_dir_tree(root, rng);
let op = match dentry {
DentryInMemory::Dir(dir) => Operation::generate_random_dir_operation(dir, idx, rng),
DentryInMemory::File(file) => Operation::generate_random_file_operation(file, idx, rng),
};
(dentry, op)
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use alloc::{boxed::Box, string::ToString, sync::Arc, vec::Vec};
use alloc::{boxed::Box, string::String, sync::Arc, vec, vec::Vec};
use core::{fmt::Debug, hint::spin_loop, mem::size_of};
use aster_block::{
@ -45,7 +45,9 @@ impl BlockDevice {
queue: BioRequestSingleQueue::new(),
}
};
aster_block::register_device(super::DEVICE_NAME.to_string(), Arc::new(block_device));
let device_id = block_device.device.device_id.clone().unwrap();
aster_block::register_device(device_id, Arc::new(block_device));
Ok(())
}
@ -133,6 +135,7 @@ struct DeviceInner {
/// it can pass to the `add_vm` function
block_responses: VmFrame,
id_allocator: SpinLock<Vec<u8>>,
device_id: Option<String>,
}
impl DeviceInner {
@ -143,6 +146,7 @@ impl DeviceInner {
if num_queues != 1 {
return Err(VirtioDeviceError::QueuesAmountDoNotMatch(num_queues, 1));
}
let queue = VirtQueue::new(0, 64, transport.as_mut()).expect("create virtqueue failed");
let mut device = Self {
config,
@ -151,23 +155,30 @@ impl DeviceInner {
block_requests: VmAllocOptions::new(1).alloc_single().unwrap(),
block_responses: VmAllocOptions::new(1).alloc_single().unwrap(),
id_allocator: SpinLock::new((0..64).collect()),
device_id: None,
};
let device_id = device.get_id();
let cloned_device_id = device_id.clone();
let handle_block_device = move |_: &TrapFrame| {
aster_block::get_device(device_id.as_str())
.unwrap()
.handle_irq();
};
device.device_id = Some(cloned_device_id);
device
.transport
.register_cfg_callback(Box::new(config_space_change))
.unwrap();
device
.transport
.register_queue_callback(0, Box::new(handle_block_device), false)
.unwrap();
fn handle_block_device(_: &TrapFrame) {
aster_block::get_device(super::DEVICE_NAME)
.unwrap()
.handle_irq();
}
fn config_space_change(_: &TrapFrame) {
info!("Virtio block device config space change");
}
@ -175,6 +186,53 @@ impl DeviceInner {
Ok(device)
}
// TODO: Most logic is the same as read and write, there should be a refactor.
// TODO: Should return an Err instead of panic if the device fails.
fn get_id(&self) -> String {
let id = self.id_allocator.lock().pop().unwrap() as usize;
let req = BlockReq {
type_: ReqType::GetId as _,
reserved: 0,
sector: 0,
};
self.block_requests
.write_val(id * size_of::<BlockReq>(), &req)
.unwrap();
let req_reader = self
.block_requests
.reader()
.skip(id * size_of::<BlockReq>())
.limit(size_of::<BlockReq>());
const MAX_ID_LENGTH: usize = 20;
let page = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
let writer = page.writer().limit(MAX_ID_LENGTH);
let mut queue = self.queue.lock_irq_disabled();
let token = queue
.add_vm(&[&req_reader], &[&writer])
.expect("add queue failed");
queue.notify();
while !queue.can_pop() {
spin_loop();
}
queue.pop_used_with_token(token).expect("pop used failed");
self.id_allocator.lock().push(id as u8);
//Add an extra 0, so that the array must end with 0.
let mut device_id = vec![0; MAX_ID_LENGTH + 1];
let _ = page.read_bytes(0, &mut device_id);
device_id.resize(device_id.iter().position(|&x| x == 0).unwrap(), 0);
String::from_utf8(device_id).unwrap()
//The device is not initialized yet, so the response must be not_ready.
}
/// Reads data from the block device, this function is blocking.
/// FIEME: replace slice with a more secure data structure to use dma mapping.
pub fn read(&self, sector_id: Sid, buf: &[VmWriter]) {

View File

@ -36,6 +36,7 @@ pub enum ReqType {
In = 0,
Out = 1,
Flush = 4,
GetId = 8,
Discard = 11,
WriteZeroes = 13,
}

View File

@ -8,6 +8,7 @@ INITRAMFS := $(BUILD_DIR)/initramfs
INITRAMFS_FILELIST := $(BUILD_DIR)/initramfs.filelist
INITRAMFS_IMAGE := $(BUILD_DIR)/initramfs.cpio.gz
EXT2_IMAGE := $(BUILD_DIR)/ext2.img
EXFAT_IMAGE := $(BUILD_DIR)/exfat.img
INITRAMFS_EMPTY_DIRS := \
$(INITRAMFS)/sbin \
$(INITRAMFS)/root \
@ -15,7 +16,8 @@ INITRAMFS_EMPTY_DIRS := \
$(INITRAMFS)/opt \
$(INITRAMFS)/proc \
$(INITRAMFS)/dev \
$(INITRAMFS)/ext2
$(INITRAMFS)/ext2 \
$(INITRAMFS)/exfat
INITRAMFS_ALL_DIRS := \
$(INITRAMFS)/etc \
$(INITRAMFS)/lib/x86_64-linux-gnu \
@ -117,8 +119,12 @@ $(EXT2_IMAGE):
@dd if=/dev/zero of=$(EXT2_IMAGE) bs=2G count=1
@mke2fs $(EXT2_IMAGE)
$(EXFAT_IMAGE):
@fallocate -l 64M $(EXFAT_IMAGE)
@mkfs.exfat $(EXFAT_IMAGE)
.PHONY: build
build: $(INITRAMFS_IMAGE) $(EXT2_IMAGE)
build: $(INITRAMFS_IMAGE) $(EXT2_IMAGE) $(EXFAT_IMAGE)
.PHONY: format
format:
@ -130,4 +136,4 @@ check:
.PHONY: clean
clean:
@rm -rf $(BUILD_DIR)
@rm -rf $(BUILD_DIR)

View File

@ -43,6 +43,7 @@ INITRAMFS ?= $(CUR_DIR)/../build/initramfs
TARGET_DIR := $(INITRAMFS)/opt/syscall_test
RUN_BASH := $(CUR_DIR)/run_syscall_test.sh
BLOCK_LIST := $(CUR_DIR)/blocklists
EXFAT_BLOCK_LIST := $(CUR_DIR)/blocklists.exfat
.PHONY: all
all: $(TESTS)
@ -65,12 +66,14 @@ $(SRC_DIR):
@cd $@ && git clone -b 20200921.0 https://github.com/asterinas/gvisor.git .
endif
$(TARGET_DIR): $(RUN_BASH) $(BLOCK_LIST)
$(TARGET_DIR): $(RUN_BASH) $(BLOCK_LIST) $(EXFAT_BLOCK_LIST)
@rm -rf $@ && mkdir -p $@
@# Prepare tests dir for test binaries
@mkdir $@/tests
@# Copy blocklists
@cp -rf $(BLOCK_LIST) $@
@# Copy exFAT specific blocklists
@cp -rf $(EXFAT_BLOCK_LIST) $@
@# Copy bash script
@cp -f $(RUN_BASH) $@

View File

@ -0,0 +1,7 @@
ChmodTest.ChmodFileSucceeds
ChmodTest.ChmodDirSucceeds
ChmodTest.FchmodFileSucceeds_NoRandomSave
ChmodTest.FchmodatFileAbsolutePath
ChmodTest.FchmodatFile
ChmodTest.ChmodFileToNoPermissionsSucceeds
ChmodTest.FchmodFileToNoPermissionsSucceeds_NoRandomSave

View File

@ -0,0 +1,9 @@
LinkTest.CanCreateLinkFile
LinkTest.PermissionDenied
LinkTest.CannotLinkDirectory
LinkTest.WithOldDirFD
LinkTest.WithNewDirFD
LinkTest.AbsPathsWithNonDirFDs
LinkTest.LinkDoesNotFollowSymlinks
LinkTest.LinkatDoesNotFollowSymlinkByDefault
LinkTest.LinkatWithSymlinkFollow

View File

@ -0,0 +1,2 @@
MkdirTest.HonorsUmask
MkdirTest.HonorsUmask2

View File

@ -0,0 +1,2 @@
OpenTest.OpenNoFollowSymlink
OpenTest.SymlinkDirectory

View File

@ -0,0 +1,2 @@
StatTest.FstatatSymlinkDir
StatTest.LstatELOOPPath

View File

@ -0,0 +1,18 @@
SymlinkTest.CanCreateSymlinkWithCachedSourceDirent
SymlinkTest.CanCreateSymlinkFile
SymlinkTest.CanCreateSymlinkDir
SymlinkTest.OldnameIsDangling
SymlinkTest.CanEvaluateLink
SymlinkTest.TargetIsNotMapped
SymlinkTest.PreadFromSymlink
SymlinkTest.ChmodSymlink
AbsAndRelTarget/ParamSymlinkTest.OpenLinkCreatesTarget/0
AbsAndRelTarget/ParamSymlinkTest.OpenLinkCreatesTarget/1
AbsAndRelTarget/ParamSymlinkTest.CreateExistingSelfLink/0
AbsAndRelTarget/ParamSymlinkTest.CreateExistingSelfLink/1
AbsAndRelTarget/ParamSymlinkTest.CreateExistingParentLink/0
AbsAndRelTarget/ParamSymlinkTest.CreateExistingParentLink/1
AbsAndRelTarget/ParamSymlinkTest.OpenLinkExclFails/0
AbsAndRelTarget/ParamSymlinkTest.OpenLinkExclFails/1
AbsAndRelTarget/ParamSymlinkTest.OpenLinkNoFollowFails/0
AbsAndRelTarget/ParamSymlinkTest.OpenLinkNoFollowFails/1

View File

@ -18,11 +18,18 @@ NC='\033[0m'
get_blocklist_subtests(){
if [ -f $BLOCKLIST_DIR/$1 ]; then
BLOCK=$(sed ':a;N;$!ba;s/\n/:/g' $BLOCKLIST_DIR/$1)
return 0
else
BLOCK=""
return 1
fi
for extra_dir in $EXTRA_BLOCKLISTS_DIRS ; do
if [ -f $SCRIPT_DIR/$extra_dir/$1 ]; then
BLOCK="${BLOCK}:$(sed ':a;N;$!ba;s/\n/:/g' $SCRIPT_DIR/$extra_dir/$1)"
fi
done
return 0
}
run_one_test(){
@ -31,10 +38,11 @@ run_one_test(){
export TEST_TMPDIR=$TEST_TMP_DIR
ret=0
if [ -f $TEST_BIN_DIR/$1 ]; then
rm -rf $TEST_TMP_DIR/*
get_blocklist_subtests $1
$TEST_BIN_DIR/$1 --gtest_filter=-$BLOCK
ret=$?
#After executing the test, it is necessary to clean the directory to ensure no residual data remains
rm -rf $TEST_TMP_DIR/*
else
echo -e "Warning: $1 test does not exit"
ret=1
@ -44,6 +52,7 @@ run_one_test(){
}
rm -f $FAIL_CASES && touch $FAIL_CASES
rm -rf $TEST_TMP_DIR/*
for syscall_test in $(find $TEST_BIN_DIR/. -name \*_test) ; do
test_name=$(basename "$syscall_test")
@ -63,4 +72,4 @@ if [ $TESTS != $PASSED_TESTS ]; then
cat $FAIL_CASES
fi
exit $RESULT
exit $RESULT