Fix all spelling mistakes in history by typos tool

This commit is contained in:
Cautreoxit 2024-08-26 15:31:58 +08:00 committed by Tate, Hongliang Tian
parent b29d3b5409
commit 86f09eef75
120 changed files with 255 additions and 213 deletions

28
.typos.toml Normal file
View File

@ -0,0 +1,28 @@
# This file is to help typos avoid false-positives.
# Words listed below are marked as valid, not mistakes.
[default.extend-words]
rela = "rela"
ANDD = "ANDD"
ethe = "ethe"
mke = "mke"
WHT = "WHT"
ist = "ist"
TME = "TME"
BA = "BA"
ND = "ND"
Fo = "Fo"
# Files with svg suffix are ignored to check.
[type.svg]
extend-glob = ["*.svg"]
check-file = false
# Files listed below are ignored to check.
[files]
extend-exclude = [
"test/syscall_test/blocklists/pty_test",
"test/build/initramfs/opt/syscall_test/blocklists/pty_test",
"test/syscall_test/blocklists/sync_test",
"test/build/initramfs/opt/syscall_test/blocklists/sync_test",
]

16
.vscode/settings.json vendored
View File

@ -15,5 +15,19 @@
"-Zbuild-std=core,alloc,compiler_builtins",
"-Zbuild-std-features=compiler-builtins-mem"
],
"rust-analyzer.showUnlinkedFileNotification": false
"rust-analyzer.showUnlinkedFileNotification": false,
"search.exclude": {
"**/*.code-search": false,
"**/bower_components": false,
"**/node_modules": false
},
"search.useIgnoreFiles": false,
"files.exclude": {
"**/.DS_Store": false,
"**/.git": false,
"**/.hg": false,
"**/.svn": false,
"**/CVS": false,
"**/Thumbs.db": false
}
}

View File

@ -91,7 +91,7 @@ After starting a debug server with OSDK from the shell with `make gdb_server`,
a temporary `launch.json` is generated under `.vscode`.
Your previous launch configs will be restored after the server is down.
Press `F5`(Run and Debug) to start a debug session via VS Code.
Click `Continue`(or, press `F5`) at the fisrt break to resume the paused server instance,
Click `Continue`(or, press `F5`) at the first break to resume the paused server instance,
then it will continue until reaching your first breakpoint.
Note that if debugging with KVM enabled, you must use hardware assisted breakpoints. See "hbreak" in

View File

@ -73,7 +73,7 @@ to introduce minimal overheads.
Ideally, these APIs should be realized
as [zero-cost abstractions](https://monomorph.is/posts/zero-cost-abstractions/).
Fortunatelly, our efforts
Fortunately, our efforts
to design and implement an OS framework meeting these standards
have borne fruit in the form of the [Asterinas OSTD](../ostd/).
Using this framework as a foundation,

View File

@ -55,7 +55,7 @@ and can be installed by
cargo install cargo-osdk
```
### Upgrate
### Upgrade
If `cargo-osdk` is already installed,
the tool can be upgraded by
```bash

View File

@ -188,7 +188,7 @@ impl InputDevice {
fn handle_irq(&self) {
let callbacks = self.callbacks.read_irq_disabled();
// Returns ture if there may be more events to handle
// Returns true if there may be more events to handle
let handle_event = |event: &EventBuf| -> bool {
event.sync().unwrap();
let event: VirtioInputEvent = event.read().unwrap();

View File

@ -61,7 +61,7 @@ impl SocketDevice {
<< 32;
let mut recv_queue = VirtQueue::new(QUEUE_RECV, QUEUE_SIZE, transport.as_mut())
.expect("createing recv queue fails");
.expect("creating recv queue fails");
let send_queue = VirtQueue::new(QUEUE_SEND, QUEUE_SIZE, transport.as_mut())
.expect("creating send queue fails");
let event_queue = VirtQueue::new(QUEUE_EVENT, QUEUE_SIZE, transport.as_mut())

View File

@ -103,7 +103,7 @@ impl fmt::Display for SocketError {
Self::UnexpectedDataInPacket => write!(f, "No data is expected in the packet"),
Self::InsufficientBufferSpaceInPeer => write!(f, "Peer has insufficient buffer space, try again later"),
Self::RecycledWrongBuffer => write!(f, "Recycled a wrong buffer"),
Self::QueueError(_) => write!(f,"Error encounted out of vsock itself!"),
Self::QueueError(_) => write!(f,"Error encountered out of vsock itself!"),
}
}
}

View File

@ -106,7 +106,7 @@ fn negotiate_features(transport: &mut Box<dyn VirtioTransport>) {
}
bitflags! {
/// all device features, bits 0~23 and 50~63 are sepecified by device.
/// all device features, bits 0~23 and 50~63 are specified by device.
/// if using this struct to translate u64, use from_bits_truncate function instead of from_bits
///
struct Feature: u64 {

View File

@ -73,7 +73,7 @@ impl VirtQueue {
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
// FIXME: How about pci legacy?
// Currently, we use one Frame to place the descriptors and avaliable rings, one Frame to place used rings
// Currently, we use one Frame to place the descriptors and available rings, one Frame to place used rings
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
if size > 128 {
return Err(QueueError::InvalidArgs);

View File

@ -62,7 +62,7 @@ impl VirtioPciCapabilityData {
3 => VirtioPciCpabilityType::IsrCfg,
4 => VirtioPciCpabilityType::DeviceCfg,
5 => VirtioPciCpabilityType::PciCfg,
_ => panic!("Unsupport virtio capability type:{:?}", cfg_type),
_ => panic!("Unsupported virtio capability type:{:?}", cfg_type),
};
let bar = vendor_cap.read8(4).unwrap();
let capability_length = vendor_cap.read8(2).unwrap();

View File

@ -10,7 +10,7 @@
//! which means the `SomeRightSet` should **include** the `AnotherRightSet`. In this case, `AnotherRightSet` should be a **generic parameter**.
//! i.e., `AnotherRightSet` should occur the the generic param list of the function.
//!
//! If there are multiple constraits, they can be seperated with `|`, which means all constraits should be satisfied.
//! If there are multiple constraints, they can be separated with `|`, which means all constraints should be satisfied.
//!
//! The require can also be used multiple times, which means each macro should be satisfied.
//!

View File

@ -135,7 +135,7 @@ pub use typeflags_util::SetContain;
/// ```
///
/// But this coding pattern is too tedius for such a common task.
/// To make the life of users easier, we provide a convinient macro named
/// To make the life of users easier, we provide a convenient macro named
/// `field_ptr`, which can be used to obtain the safe pointer of a field from
/// that of its containing struct.
///

View File

@ -9,7 +9,7 @@ cargo install --path .
This will install two binaries `cargo-component` and `component-driver` at `$HOME/.cargo/bin`(by default, it depends on the cargo config).
## Usage
Use `cargo component` or `cargo component check` or `cargo component audit`. The three commands are the same now. For Asterinas, we shoud use another alias command `cargo component-check`, which was defined in `src/.cargo/config.toml`.
Use `cargo component` or `cargo component check` or `cargo component audit`. The three commands are the same now. For Asterinas, we should use another alias command `cargo component-check`, which was defined in `src/.cargo/config.toml`.
### Two notes:
- The directory **where you run the command** should contains a `Components.toml` config file, where defines all components and whitelist.

View File

@ -166,7 +166,7 @@ fn check_inline_asm_operand(
/// check whether visiting the operand in local crate is valid.
/// if the operand is invalid, add the def_path to def_paths.
/// The operand is invalid only when follwing four points are all satisfied.
/// The operand is invalid only when following four points are all satisfied.
/// 1. The operand represents a static variable or a func(the first argument can not be self or its variants).
/// 2. The operand is not defined in local crate.
/// 3. The operand is marked with #[component_access_control::controlled]

View File

@ -180,7 +180,7 @@ fn read_component_file(workspace_root: &str) -> Vec<String> {
.collect();
}
}
panic!("Componets.toml file not valid")
panic!("Components.toml file not valid")
}
/// calculate the priority of one node

View File

@ -11,7 +11,7 @@ Registering a crate as component by marking a function in the lib.rs with `#[ini
### Component initialization
Component system need to be initialized by calling `componet::init_all` function and it needs information about all components. Usually it is used with the `component::parse_metadata` macro.
Component system need to be initialized by calling `component::init_all` function and it needs information about all components. Usually it is used with the `component::parse_metadata` macro.
## Example

View File

@ -128,7 +128,7 @@ fn parse_input(components: Vec<ComponentInfo>) -> BTreeMap<String, ComponentInfo
out
}
/// Match the ComponetInfo with ComponentRegistry. The key is the relative path of one component
/// Match the ComponentInfo with ComponentRegistry. The key is the relative path of one component
fn match_and_call(
mut components: BTreeMap<String, ComponentInfo>,
) -> Result<(), ComponentSystemInitError> {
@ -161,7 +161,7 @@ fn match_and_call(
infos.push(info);
}
debug!("Remain componets:{components:?}");
debug!("Remain components:{components:?}");
if !components.is_empty() {
info!("Exists components that are not initialized");
@ -174,11 +174,11 @@ fn match_and_call(
for i in infos {
info!("Component initializing:{:?}", i);
if let Err(res) = i.function.unwrap().call(()) {
error!("Component initalize error:{:?}", res);
error!("Component initialize error:{:?}", res);
} else {
info!("Component initalize complete");
info!("Component initialize complete");
}
}
info!("All components initalization completed");
info!("All components initialization completed");
Ok(())
}

View File

@ -31,5 +31,5 @@ fn main() {
## Introduction
This crate provides a derive procedural macro named `TryFromInt`. This macro will automatically implement [TryFrom](https://doc.rust-lang.org/core/convert/trait.TryFrom.html) trait for enums that meet the following requirements:
1. The enum must have a primitive repr, i.e., the enum should have attribute like #[repr(u8)], #[repr(u32)], etc. The type parameter of TryFrom will be the repr, e.g., in the `QuickStart` example, the macro will implment `TryFrom<u8>` for `Color`.
1. The enum must have a primitive repr, i.e., the enum should have attribute like #[repr(u8)], #[repr(u32)], etc. The type parameter of TryFrom will be the repr, e.g., in the `QuickStart` example, the macro will implement `TryFrom<u8>` for `Color`.
2. The enum must consist solely of unit variants, which is called [units only enum](https://doc.rust-lang.org/reference/items/enumerations.html#unit-only-enum). Each field should have an **explicit discriminant**.

View File

@ -69,8 +69,8 @@ fn fn_body_tokens(value_name: &str, data_enum: &DataEnum, ident: Ident) -> Token
.discriminant
.as_ref()
.expect("Each field must be assigned a discriminant value explicitly");
let vairant_ident = &variant.ident;
let statement = quote!(#value => ::core::result::Result::Ok(#ident::#vairant_ident),);
let variant_ident = &variant.ident;
let statement = quote!(#value => ::core::result::Result::Ok(#ident::#variant_ident),);
match_bodys.append_all(statement);
}
match_bodys.append_all(

View File

@ -10,7 +10,7 @@ use crate::type_flag::TypeFlagDef;
const EMPTY_SET_NAME: &str = "::typeflags_util::Nil";
const SET_NAME: &str = "::typeflags_util::Cons";
/// A flagSet represent the combination of differnt flag item.
/// A flagSet represent the combination of different flag item.
/// e.g. [Read, Write], [Read], [] are all flag sets.
/// The order of flagItem does not matters. So flag sets with same sets of items should be viewed as the same set.
#[derive(Debug)]

View File

@ -25,8 +25,8 @@ pub fn expand_type_flag(type_flags_def: &TypeFlagDef) -> TokenStream {
all_tokens.append_all(impl_main_trait_tokens);
});
let impl_set_entend_tokens = impl_set_extend(type_flags_def, &flag_sets);
all_tokens.append_all(impl_set_entend_tokens);
let impl_set_intend_tokens = impl_set_extend(type_flags_def, &flag_sets);
all_tokens.append_all(impl_set_intend_tokens);
let export_declarive_macro_tokens = export_declarive_macro(type_flags_def, &flag_sets);
all_tokens.append_all(export_declarive_macro_tokens);

View File

@ -326,7 +326,7 @@ impl LineDiscipline {
}
} else {
// raw mode
// FIXME: avoid addtional bound check
// FIXME: avoid additional bound check
*dst_i = next_char;
read_len += 1;
}

View File

@ -73,7 +73,7 @@ impl EpollFile {
let mask = ep_event.events;
let entry = EpollEntry::new(fd, weak_file, ep_event, ep_flags, self.weak_self.clone());
// Add the new entry to the interest list and start monitering its events
// Add the new entry to the interest list and start monitoring its events
let mut interest = self.interest.lock();
if interest.contains_key(&fd) {
return_errno_with_message!(Errno::EEXIST, "the fd has been added");

View File

@ -144,7 +144,7 @@ impl ExfatBitmap {
.fs()
.is_cluster_range_valid(search_start_cluster..search_start_cluster + num_clusters)
{
return_errno_with_message!(Errno::ENOSPC, "free contigous clusters not avalable.")
return_errno_with_message!(Errno::ENOSPC, "free contiguous clusters not available.")
}
let mut cur_index = search_start_cluster - EXFAT_RESERVED_CLUSTERS;

View File

@ -246,11 +246,11 @@ impl ExfatDentrySet {
create_utc_offset: dos_time.utc_offset,
create_date: dos_time.date,
create_time: dos_time.time,
create_time_cs: dos_time.increament_10ms,
create_time_cs: dos_time.increment_10ms,
modify_utc_offset: dos_time.utc_offset,
modify_date: dos_time.date,
modify_time: dos_time.time,
modify_time_cs: dos_time.increament_10ms,
modify_time_cs: dos_time.increment_10ms,
access_utc_offset: dos_time.utc_offset,
access_date: dos_time.date,
access_time: dos_time.time,
@ -403,7 +403,7 @@ impl ExfatDentrySet {
}
Ok(name)
}
/// Name dentries are not permited to modify. We should create a new dentry set for renaming.
/// Name dentries are not permitted to modify. We should create a new dentry set for renaming.
fn calculate_checksum(&self) -> u16 {
const CHECKSUM_BYTES_RANGE: Range<usize> = 2..4;
@ -505,7 +505,7 @@ impl Iterator for ExfatDentryIterator {
#[repr(C, packed)]
#[derive(Clone, Debug, Default, Copy, Pod)]
// For files & directorys
// For files & directories
pub(super) struct ExfatFileDentry {
pub(super) dentry_type: u8, // 0x85
// Number of Secondary directory entries.
@ -635,7 +635,7 @@ pub(super) struct ExfatGenericSecondaryDentry {
#[derive(Clone, Debug, Default, Copy, Pod)]
pub(super) struct ExfatDeletedDentry {
pub(super) dentry_type: u8,
pub(super) reserverd: [u8; 31],
pub(super) reserved: [u8; 31],
}
#[derive(Default, Debug)]

View File

@ -52,7 +52,7 @@ bitflags! {
const SYSTEM = 0x0004;
/// This inode represents a volume. This attribute is not supported in our implementation.
const VOLUME = 0x0008;
/// This inode reprents a directory.
/// This inode represents a directory.
const DIRECTORY = 0x0010;
/// This file has been touched since the last DOS backup was performed on it. This attribute is not supported in our implementation.
const ARCHIVE = 0x0020;
@ -187,7 +187,7 @@ impl ExfatInodeInner {
self.fs().find_opened_inode(self.parent_hash)
}
/// Get physical sector id from logical sector id fot this Inode.
/// Get physical sector id from logical sector id for this Inode.
fn get_sector_id(&self, sector_id: usize) -> Result<usize> {
let chain_offset = self
.start_chain
@ -315,12 +315,12 @@ impl ExfatInodeInner {
file_dentry.create_utc_offset = self.ctime.utc_offset;
file_dentry.create_date = self.ctime.date;
file_dentry.create_time = self.ctime.time;
file_dentry.create_time_cs = self.ctime.increament_10ms;
file_dentry.create_time_cs = self.ctime.increment_10ms;
file_dentry.modify_utc_offset = self.mtime.utc_offset;
file_dentry.modify_date = self.mtime.date;
file_dentry.modify_time = self.mtime.time;
file_dentry.modify_time_cs = self.mtime.increament_10ms;
file_dentry.modify_time_cs = self.mtime.increment_10ms;
file_dentry.access_utc_offset = self.atime.utc_offset;
file_dentry.access_date = self.atime.date;
@ -692,11 +692,11 @@ impl ExfatInode {
parent_hash: usize,
fs_guard: &MutexGuard<()>,
) -> Result<Arc<ExfatInode>> {
const EXFAT_MIMIMUM_DENTRY: usize = 3;
const EXFAT_MINIMUM_DENTRY: usize = 3;
let ino = fs.alloc_inode_number();
if dentry_set.len() < EXFAT_MIMIMUM_DENTRY {
if dentry_set.len() < EXFAT_MINIMUM_DENTRY {
return_errno_with_message!(Errno::EINVAL, "invalid dentry length")
}
@ -1313,7 +1313,7 @@ impl Inode for ExfatInode {
new_size.max(file_size)
};
// Locks released here, so that file write can be parallized.
// Locks released here, so that file write can be parallelized.
let inner = self.inner.upread();
inner.page_cache.pages().write(offset, reader)?;

View File

@ -263,7 +263,7 @@ mod test {
info!("Successfully creating and reading {} files", file_id + 1);
}
//Test skiped readdir.
//Test skipped readdir.
let mut sub_inodes: Vec<String> = Vec::new();
let _ = root.readdir_at(file_names.len() / 3 + 2, &mut sub_inodes);
@ -956,7 +956,7 @@ mod test {
let resize_too_large = f.resize(initial_free_clusters as usize * cluster_size + 1);
assert!(
resize_too_large.is_err() && fs.num_free_clusters() == initial_free_clusters,
"Fail to deal with a memeory overflow allocation"
"Fail to deal with a memory overflow allocation"
);
// Try to allocate a file of exactly the same size as the remaining spaces. This will succeed.

View File

@ -47,11 +47,11 @@ const EXFAT_TIME_ZONE_VALID: u8 = 1 << 7;
#[derive(Default, Debug, Clone, Copy)]
pub struct DosTimestamp {
// Timestamp at the precesion of double seconds.
// Timestamp at the precision of double seconds.
pub(super) time: u16,
pub(super) date: u16,
// Precise time in 10ms.
pub(super) increament_10ms: u8,
pub(super) increment_10ms: u8,
pub(super) utc_offset: u8,
}
@ -73,11 +73,11 @@ impl DosTimestamp {
}
}
pub fn new(time: u16, date: u16, increament_10ms: u8, utc_offset: u8) -> Result<Self> {
pub fn new(time: u16, date: u16, increment_10ms: u8, utc_offset: u8) -> Result<Self> {
let time = Self {
time,
date,
increament_10ms,
increment_10ms,
utc_offset,
};
Ok(time)
@ -102,13 +102,13 @@ impl DosTimestamp {
| ((date_time.day() as u16) << DAY_RANGE.start);
const NSEC_PER_10MSEC: u32 = 10000000;
let increament_10ms =
let increment_10ms =
(date_time.second() as u32 % 2 * 100 + date_time.nanosecond() / NSEC_PER_10MSEC) as u8;
Ok(Self {
time,
date,
increament_10ms,
increment_10ms,
utc_offset: 0,
})
}
@ -144,15 +144,15 @@ impl DosTimestamp {
let mut sec = date_time.assume_utc().unix_timestamp() as u64;
let mut nano_sec: u32 = 0;
if self.increament_10ms != 0 {
if self.increment_10ms != 0 {
const NSEC_PER_MSEC: u32 = 1000000;
sec += self.increament_10ms as u64 / 100;
nano_sec = (self.increament_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
sec += self.increment_10ms as u64 / 100;
nano_sec = (self.increment_10ms as u32 % 100) * 10 * NSEC_PER_MSEC;
}
/* Adjust timezone to UTC0. */
if (self.utc_offset & EXFAT_TIME_ZONE_VALID) != 0u8 {
sec = Self::ajust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
sec = Self::adjust_time_zone(sec, self.utc_offset & (!EXFAT_TIME_ZONE_VALID));
} else {
// TODO: Use mount info for timezone adjustment.
}
@ -160,7 +160,7 @@ impl DosTimestamp {
Ok(Duration::new(sec, nano_sec))
}
fn ajust_time_zone(sec: u64, time_zone: u8) -> u64 {
fn adjust_time_zone(sec: u64, time_zone: u8) -> u64 {
if time_zone <= 0x3F {
sec + Self::time_zone_sec(time_zone)
} else {

View File

@ -1485,7 +1485,7 @@ impl InodeImpl_ {
/// Shrinks inode size.
///
/// After the reduction, the size will be shrinked to `new_size`,
/// After the reduction, the size will be shrunk to `new_size`,
/// which may result in an decreased block count.
fn shrink(&mut self, new_size: usize) {
let new_blocks = self.desc.size_to_blocks(new_size);

View File

@ -133,7 +133,7 @@ impl TryFrom<RawSuperBlock> for SuperBlock {
check_interval: Duration::from_secs(sb.check_interval as _),
creator_os: {
let os_id = OsId::try_from(sb.creator_os)
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creater os"))?;
.map_err(|_| Error::with_message(Errno::EINVAL, "invalid creator os"))?;
if os_id != OsId::Linux {
return_errno_with_message!(Errno::EINVAL, "not supported os id");
}
@ -309,7 +309,7 @@ impl SuperBlock {
Bid::new(super_block_bid as u64)
}
/// Returns the starting block id of the block group descripter table
/// Returns the starting block id of the block group descriptor table
/// inside the block group pointed by `block_group_idx`.
///
/// # Panics
@ -465,7 +465,7 @@ pub(super) struct RawSuperBlock {
pub prealloc_dir_blocks: u8,
padding1: u16,
///
/// This fileds are for journaling support in Ext3.
/// This fields are for journaling support in Ext3.
///
/// Uuid of journal superblock.
pub journal_uuid: [u8; 16],

View File

@ -2,7 +2,7 @@
#![allow(unused_variables)]
//! Opend File Handle
//! Opened File Handle
use crate::{
events::{IoEvents, Observer},

View File

@ -2,7 +2,7 @@
#![allow(unused_variables)]
//! Opend Inode-backed File Handle
//! Opened Inode-backed File Handle
mod dyn_cap;
mod static_cap;

View File

@ -315,7 +315,7 @@ impl<T: Copy, R: TRights> Fifo<T, R> {
impl<T, R: TRights> Fifo<T, R> {
/// Pushes an item into the endpoint.
/// If the `push` method failes, this method will return
/// If the `push` method fails, this method will return
/// `Err` containing the item that hasn't been pushed
#[require(R > Write)]
pub fn push(&self, item: T) -> core::result::Result<(), T> {

View File

@ -72,7 +72,7 @@ impl PageCache {
pub fn resize(&self, new_size: usize) -> Result<()> {
// If the new size is smaller and not page-aligned,
// first zero the gap between the new size and the
// next page boundry (or the old size), if such a gap exists.
// next page boundary (or the old size), if such a gap exists.
let old_size = self.pages.size();
if old_size > new_size && new_size % PAGE_SIZE != 0 {
let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size;

View File

@ -153,7 +153,7 @@ impl DirInMemory {
create_result.unwrap_err()
);
info!(
" create {:?}/{:?}({:?}) succeeeded",
" create {:?}/{:?}({:?}) succeeded",
self.name, name, type_
);

View File

@ -117,7 +117,7 @@ impl RangeLockItem {
.range
.set_start(new_start)
.expect("invalid new start");
if let FileRangeChange::Shrinked = change {
if let FileRangeChange::Shrunk = change {
self.wake_all();
}
}
@ -126,7 +126,7 @@ impl RangeLockItem {
/// If the range shrinks, it will wake all waiting processes
pub fn set_end(&mut self, new_end: usize) {
let change = self.range().set_end(new_end).expect("invalid new end");
if let FileRangeChange::Shrinked = change {
if let FileRangeChange::Shrunk = change {
self.wake_all();
}
}

View File

@ -50,7 +50,7 @@ impl FileRange {
let old_start = self.start;
self.start = new_start;
let change = match new_start {
new_start if new_start > old_start => FileRangeChange::Shrinked,
new_start if new_start > old_start => FileRangeChange::Shrunk,
new_start if new_start < old_start => FileRangeChange::Expanded,
_ => FileRangeChange::Same,
};
@ -64,7 +64,7 @@ impl FileRange {
let old_end = self.end;
self.end = new_end;
let change = match new_end {
new_end if new_end < old_end => FileRangeChange::Shrinked,
new_end if new_end < old_end => FileRangeChange::Shrunk,
new_end if new_end > old_end => FileRangeChange::Expanded,
_ => FileRangeChange::Same,
};
@ -110,7 +110,7 @@ impl FileRange {
pub enum FileRangeChange {
Same,
Expanded,
Shrinked,
Shrunk,
}
/// The position of a range (say A) relative another overlapping range (say B).

View File

@ -31,7 +31,7 @@ pub const SEMMNS: usize = SEMMNI * SEMMSL;
pub const SEMOPM: usize = 500;
/// MAximum semaphore value.
pub const SEMVMX: i32 = 32767;
/// Maximum value that can be recored for semaphore adjustment (SEM_UNDO).
/// Maximum value that can be recorded for semaphore adjustment (SEM_UNDO).
pub const SEMAEM: i32 = SEMVMX;
#[derive(Debug)]

View File

@ -89,7 +89,7 @@ impl AnyBoundSocket {
/// Set the observer whose `on_events` will be called when certain iface events happen. After
/// setting, the new observer will fire once immediately to avoid missing any events.
///
/// If there is an existing observer, due to race conditions, this function does not guarentee
/// If there is an existing observer, due to race conditions, this function does not guarantee
/// that the old observer will never be called after the setting. Users should be aware of this
/// and proactively handle the race conditions if necessary.
pub fn set_observer(&self, handler: Weak<dyn Observer<()>>) {

View File

@ -41,7 +41,7 @@ pub trait Iface: internal::IfaceInternal + Send + Sync {
fn poll(&self);
/// Bind a socket to the iface. So the packet for this socket will be dealt with by the interface.
/// If port is None, the iface will pick up an empheral port for the socket.
/// If port is None, the iface will pick up an ephemeral port for the socket.
/// FIXME: The reason for binding socket and interface together is because there are limitations inside smoltcp.
/// See discussion at <https://github.com/smoltcp-rs/smoltcp/issues/779>.
fn bind_socket(

View File

@ -71,7 +71,7 @@ impl BoundDatagram {
return_errno_with_message!(Errno::EAGAIN, "the send buffer is full")
}
Some(Err(SendError::Unaddressable)) => {
return_errno_with_message!(Errno::EINVAL, "the destionation address is invalid")
return_errno_with_message!(Errno::EINVAL, "the destination address is invalid")
}
None => return_errno_with_message!(Errno::EMSGSIZE, "the message is too large"),
}

View File

@ -101,7 +101,7 @@ impl DatagramSocket {
}
}
fn try_bind_empheral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
fn try_bind_ephemeral(&self, remote_endpoint: &IpEndpoint) -> Result<()> {
// Fast path
if let Inner::Bound(_) = self.inner.read().as_ref() {
return Ok(());
@ -269,7 +269,7 @@ impl Socket for DatagramSocket {
fn connect(&self, socket_addr: SocketAddr) -> Result<()> {
let endpoint = socket_addr.try_into()?;
self.try_bind_empheral(&endpoint)?;
self.try_bind_ephemeral(&endpoint)?;
let mut inner = self.inner.write();
let Inner::Bound(bound_datagram) = inner.as_mut() else {
@ -311,7 +311,7 @@ impl Socket for DatagramSocket {
let remote_endpoint = match addr {
Some(remote_addr) => {
let endpoint = remote_addr.try_into()?;
self.try_bind_empheral(&endpoint)?;
self.try_bind_ephemeral(&endpoint)?;
endpoint
}
None => self.remote_endpoint().ok_or_else(|| {

View File

@ -481,7 +481,7 @@ impl Socket for StreamSocket {
let state = self.state.read();
match state.as_ref() {
State::Connected(connected_stream) => connected_stream.shutdown(cmd),
// TDOD: shutdown listening stream
// TODO: shutdown listening stream
_ => return_errno_with_message!(Errno::EINVAL, "cannot shutdown"),
}
}

View File

@ -20,8 +20,8 @@ impl Connected {
addr: Option<UnixSocketAddrBound>,
peer_addr: Option<UnixSocketAddrBound>,
) -> (Connected, Connected) {
let (writer_this, reader_peer) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
let (writer_peer, reader_this) = Channel::with_capacity(DAFAULT_BUF_SIZE).split();
let (writer_this, reader_peer) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
let (writer_peer, reader_this) = Channel::with_capacity(DEFAULT_BUF_SIZE).split();
let this = Connected {
addr: addr.clone(),
@ -122,4 +122,4 @@ impl Connected {
}
}
const DAFAULT_BUF_SIZE: usize = 65536;
const DEFAULT_BUF_SIZE: usize = 65536;

View File

@ -4,7 +4,7 @@ use crate::prelude::*;
bitflags! {
/// Flags used for send/recv.
/// The definiton is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
/// The definition is from https://elixir.bootlin.com/linux/v6.0.9/source/include/linux/socket.h
#[repr(C)]
#[derive(Pod)]
pub struct SendRecvFlags: i32 {

View File

@ -225,7 +225,7 @@ impl VsockSpace {
let Some(listen) = listen_sockets.get(&event.destination.into()) else {
return_errno_with_message!(
Errno::EINVAL,
"connecion request can only be handled by listening socket"
"connection request can only be handled by listening socket"
);
};
let peer = event.source;

View File

@ -56,8 +56,8 @@ impl Listen {
}
pub fn update_io_events(&self) {
let incomming_connection = self.incoming_connection.disable_irq().lock();
if !incomming_connection.is_empty() {
let incoming_connection = self.incoming_connection.disable_irq().lock();
if !incoming_connection.is_empty() {
self.pollee.add_events(IoEvents::IN);
} else {
self.pollee.del_events(IoEvents::IN);

View File

@ -124,7 +124,7 @@ impl CloneFlags {
/// Clone a child thread or child process.
///
/// FIXME: currently, the child process or thread will be scheduled to run at once,
/// but this may not be the expected bahavior.
/// but this may not be the expected behavior.
pub fn clone_child(
ctx: &Context,
parent_context: &UserContext,
@ -411,7 +411,7 @@ fn clone_sighand(
parent_sig_dispositions: &Arc<Mutex<SigDispositions>>,
clone_flags: CloneFlags,
) -> Arc<Mutex<SigDispositions>> {
// similer to CLONE_FILES
// similar to CLONE_FILES
if clone_flags.contains(CloneFlags::CLONE_SIGHAND) {
parent_sig_dispositions.clone()
} else {

View File

@ -49,7 +49,7 @@ impl<R: TRights> Credentials<R> {
/// Gets real user id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn ruid(&self) -> Uid {
self.0.ruid()
@ -57,7 +57,7 @@ impl<R: TRights> Credentials<R> {
/// Gets effective user id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn euid(&self) -> Uid {
self.0.euid()
@ -65,7 +65,7 @@ impl<R: TRights> Credentials<R> {
/// Gets saved-set user id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn suid(&self) -> Uid {
self.0.suid()
@ -73,7 +73,7 @@ impl<R: TRights> Credentials<R> {
/// Gets file system user id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn fsuid(&self) -> Uid {
self.0.fsuid()
@ -143,7 +143,7 @@ impl<R: TRights> Credentials<R> {
/// Gets real group id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn rgid(&self) -> Gid {
self.0.rgid()
@ -151,7 +151,7 @@ impl<R: TRights> Credentials<R> {
/// Gets effective group id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn egid(&self) -> Gid {
self.0.egid()
@ -159,7 +159,7 @@ impl<R: TRights> Credentials<R> {
/// Gets saved-set group id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn sgid(&self) -> Gid {
self.0.sgid()
@ -167,7 +167,7 @@ impl<R: TRights> Credentials<R> {
/// Gets file system group id.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn fsgid(&self) -> Gid {
self.0.fsgid()
@ -237,7 +237,7 @@ impl<R: TRights> Credentials<R> {
/// Acquires the read lock of supplementary group ids.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn groups(&self) -> RwLockReadGuard<BTreeSet<Gid>> {
self.0.groups()
@ -255,7 +255,7 @@ impl<R: TRights> Credentials<R> {
/// Gets the capabilities that child process can inherit.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn inheritable_capset(&self) -> CapSet {
self.0.inheritable_capset()
@ -263,7 +263,7 @@ impl<R: TRights> Credentials<R> {
/// Gets the capabilities that are permitted.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn permitted_capset(&self) -> CapSet {
self.0.permitted_capset()
@ -271,7 +271,7 @@ impl<R: TRights> Credentials<R> {
/// Gets the capabilities that actually use.
///
/// This method requies the `Read` right.
/// This method requires the `Read` right.
#[require(R > Read)]
pub fn effective_capset(&self) -> CapSet {
self.0.effective_capset()

View File

@ -33,7 +33,7 @@ pub struct RobustListHead {
impl RobustListHead {
/// Return an iterator for all futexes in the robust list.
///
/// The futex refered to by `list_op_pending`, if any, will be returned as
/// The futex referred to by `list_op_pending`, if any, will be returned as
/// the last item.
pub fn futexes(&self) -> FutexIter<'_> {
FutexIter::new(self)

View File

@ -7,7 +7,7 @@ use crate::{
process::{process_table, Pgid, ProcessGroup},
};
/// A termial is used to interact with system. A terminal can support the shell
/// A terminal is used to interact with system. A terminal can support the shell
/// job control.
///
/// We currently support two kinds of terminal, the tty and pty.

View File

@ -217,7 +217,7 @@ struct InitStackWriter {
impl InitStackWriter {
fn write(mut self) -> Result<()> {
// FIXME: Some OSes may put the first page of excutable file here
// FIXME: Some OSes may put the first page of executable file here
// for interpreting elf headers.
let argc = self.argv.len() as u64;
@ -268,7 +268,7 @@ impl InitStackWriter {
}
/// Libc ABI requires 16-byte alignment of the stack entrypoint.
/// Current postion of the stack is 8-byte aligned already, insert 8 byte
/// Current position of the stack is 8-byte aligned already, insert 8 byte
/// to meet the requirement if necessary.
fn adjust_stack_alignment(&self, envp_pointers: &[u64], argv_pointers: &[u64]) -> Result<()> {
// Ensure 8-byte alignment
@ -285,7 +285,7 @@ impl InitStackWriter {
}
fn write_aux_vec(&self) -> Result<()> {
// Write NULL auxilary
// Write NULL auxiliary
self.write_u64(0)?;
self.write_u64(AuxKey::AT_NULL as u64)?;
// Write Auxiliary vectors

View File

@ -16,7 +16,7 @@ use crate::{
prelude::*,
};
/// Load an executable to root vmar, including loading programe image, preparing heap and stack,
/// Load an executable to root vmar, including loading programme image, preparing heap and stack,
/// initializing argv, envp and aux tables.
/// About recursion_limit: recursion limit is used to limit th recursion depth of shebang executables.
/// If the interpreter(the program behind #!) of shebang executable is also a shebang,

View File

@ -26,7 +26,7 @@ pub fn parse_shebang_line(file_header_buffer: &[u8]) -> Result<Option<Vec<CStrin
if shebang_argv.len() != 1 {
return_errno_with_message!(
Errno::EINVAL,
"One and only one intpreter program should be specified"
"One and only one interpreter program should be specified"
);
}
Ok(Some(shebang_argv))

View File

@ -239,7 +239,7 @@ impl Observer<IoEvents> for EventCounter {
/// according to the events.
///
/// This trait is added instead of creating a new method in [`Pollee`] because sometimes we do not
/// have access to the internal [`Pollee`], but there is a method that provides the same sematics
/// have access to the internal [`Pollee`], but there is a method that provides the same semantics
/// as [`Pollee::poll`] and we need to perform event-based operations using that method.
pub trait Pollable {
/// Returns the interesting events if there are any, or waits for them to happen if there are

View File

@ -26,7 +26,7 @@ impl TryFrom<u8> for SigNum {
}
impl SigNum {
/// Caller must ensure the sig_num is valid. otherweise, use try_from will check sig_num and does not panic.
/// Caller must ensure the sig_num is valid. Otherwise, use try_from will check sig_num and does not panic.
pub const fn from_u8(sig_num: u8) -> Self {
if sig_num > MAX_RT_SIG_NUM || sig_num < MIN_STD_SIG_NUM {
panic!("invalid signal number")

View File

@ -83,7 +83,7 @@ impl SigStack {
self.handler_counter -= 1
}
/// Determins whether the stack is executed on by any signal handler
/// Determines whether the stack is executed on by any signal handler
pub fn is_active(&self) -> bool {
// FIXME: can DISABLE stack be used?
self.handler_counter != 0 && !self.flags.contains(SigStackFlags::SS_AUTODISARM)

View File

@ -41,7 +41,7 @@ impl FaultSignal {
let addr = Some(trap_info.page_fault_addr as u64);
(SIGSEGV, code, addr)
}
_ => panic!("Exception cannnot be a signal"),
_ => panic!("Exception cannot be a signal"),
};
FaultSignal { num, code, addr }
}

View File

@ -206,7 +206,7 @@ impl Condvar {
}
/// Wait for the condition to become true,
/// and until the condition is explicitly woken up or interupted.
/// and until the condition is explicitly woken up or interrupted.
///
/// This function blocks until either the condition becomes false
/// or the condition variable is explicitly notified.

View File

@ -76,7 +76,7 @@ pub enum MadviseBehavior {
MADV_HUGEPAGE = 14, /* Worth backing with hugepages */
MADV_NOHUGEPAGE = 15, /* Not worth backing with hugepages */
MADV_DONTDUMP = 16, /* Explicity exclude from the core dump,
MADV_DONTDUMP = 16, /* Explicitly exclude from the core dump,
overrides the coredump filter bits */
MADV_DODUMP = 17, /* Clear the MADV_DONTDUMP flag */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
//! Read the Cpu ctx content then dispatch syscall to corrsponding handler
//! Read the Cpu ctx content then dispatch syscall to corresponding handler
//! The each sub module contains functions that handle real syscall logic.
pub use clock_gettime::ClockId;
use ostd::cpu::UserContext;
@ -141,7 +141,7 @@ mod waitid;
mod write;
/// This macro is used to define syscall handler.
/// The first param is ths number of parameters,
/// The first param is the number of parameters,
/// The second param is the function name of syscall handler,
/// The third is optional, means the args(if parameter number > 0),
/// The third is optional, means if cpu ctx is required.

View File

@ -21,7 +21,7 @@ pub fn sys_read(
// According to <https://man7.org/linux/man-pages/man2/read.2.html>, if
// the user specified an empty buffer, we should detect errors by checking
// the file discriptor. If no errors detected, return 0 successfully.
// the file descriptor. If no errors detected, return 0 successfully.
let read_len = if buf_len != 0 {
let mut writer = ctx
.process

View File

@ -12,7 +12,7 @@ use crate::{
};
pub fn sys_ftruncate(fd: FileDesc, len: isize, ctx: &Context) -> Result<SyscallReturn> {
debug!("fd = {}, lentgh = {}", fd, len);
debug!("fd = {}, length = {}", fd, len);
check_length(len, ctx)?;

View File

@ -21,7 +21,7 @@ pub fn sys_write(
// According to <https://man7.org/linux/man-pages/man2/write.2.html>, if
// the user specified an empty buffer, we should detect errors by checking
// the file discriptor. If no errors detected, return 0 successfully.
// the file descriptor. If no errors detected, return 0 successfully.
let write_len = if user_buf_len != 0 {
let mut reader = ctx
.process

View File

@ -97,7 +97,7 @@ fn log_trap_info(exception: &CpuException, trap_info: &CpuExceptionInfo) {
DEVICE_NOT_AVAILABLE => log_trap_common!(DEVICE_NOT_AVAILABLE, trap_info),
DOUBLE_FAULT => log_trap_common!(DOUBLE_FAULT, trap_info),
COPROCESSOR_SEGMENT_OVERRUN => log_trap_common!(COPROCESSOR_SEGMENT_OVERRUN, trap_info),
INVAILD_TSS => log_trap_common!(INVAILD_TSS, trap_info),
INVALID_TSS => log_trap_common!(INVALID_TSS, trap_info),
SEGMENT_NOT_PRESENT => log_trap_common!(SEGMENT_NOT_PRESENT, trap_info),
STACK_SEGMENT_FAULT => log_trap_common!(STACK_SEGMENT_FAULT, trap_info),
GENERAL_PROTECTION_FAULT => log_trap_common!(GENERAL_PROTECTION_FAULT, trap_info),

View File

@ -54,7 +54,7 @@ impl SystemTime {
self.0.checked_add(duration).map(SystemTime)
}
/// Substract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
/// Subtract a duration from self. If the result does not exceed inner bounds return Some(t), else return None.
pub fn checked_sub(&self, duration: Duration) -> Option<Self> {
let duration = convert_to_time_duration(duration);
self.0.checked_sub(duration).map(SystemTime)

View File

@ -334,7 +334,7 @@ impl VmMapping {
///
/// Generally, this function is only used in `protect()` method.
/// This method modifies the parent `Vmar` in the end if subdividing is required.
/// It removes current mapping and add splitted mapping to the Vmar.
/// It removes current mapping and add split mapping to the Vmar.
fn protect_with_subdivision(
&self,
intersect_range: &Range<usize>,
@ -402,7 +402,7 @@ impl VmMapping {
return Ok(());
}
if trim_range.start <= map_to_addr && trim_range.end >= map_to_addr + map_size {
// Fast path: the whole mapping was trimed.
// Fast path: the whole mapping was trimmed.
self.unmap(trim_range, true)?;
mappings_to_remove.push_back(map_to_addr);
return Ok(());

View File

@ -73,7 +73,7 @@ pub use pager::Pager;
///
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
/// Functions exist both for static capbility and dynamic capibility
/// Functions exist both for static capbility and dynamic capability
pub trait VmoRightsOp {
/// Returns the access rights.
fn rights(&self) -> Rights;
@ -94,7 +94,7 @@ pub trait VmoRightsOp {
}
// We implement this trait for VMO, so we can use functions on type like Vmo<R> without trait bounds.
// FIXME: This requires the imcomplete feature specialization, which should be fixed further.
// FIXME: This requires the incomplete feature specialization, which should be fixed further.
impl<R> VmoRightsOp for Vmo<R> {
default fn rights(&self) -> Rights {
unimplemented!()

View File

@ -13,7 +13,7 @@ OSDK (short for Operating System Development Kit) is designed to simplify the de
#### Requirements
Currenly, `cargo-osdk` only supports x86_64 ubuntu system.
Currently, `cargo-osdk` only supports x86_64 ubuntu system.
To run a kernel with QEMU, `cargo-osdk` requires the following tools to be installed:
- Rust >= 1.75.0

View File

@ -80,9 +80,9 @@ pub fn new_base_crate(
// here when OSTD is ready
include_linker_script!(["x86_64.ld"]);
// Overrite the main.rs file
// Overwrite the main.rs file
let main_rs = include_str!("main.rs.template");
// Replace all occurence of `#TARGET_NAME#` with the `dep_crate_name`
// Replace all occurrence of `#TARGET_NAME#` with the `dep_crate_name`
let main_rs = main_rs.replace("#TARGET_NAME#", &dep_crate_name.replace('-', "_"));
fs::write("src/main.rs", main_rs).unwrap();
@ -104,10 +104,10 @@ fn add_manifest_dependency(
crate_path: impl AsRef<Path>,
link_unit_test_runner: bool,
) {
let mainfest_path = "Cargo.toml";
let manifest_path = "Cargo.toml";
let mut manifest: toml::Table = {
let content = fs::read_to_string(mainfest_path).unwrap();
let content = fs::read_to_string(manifest_path).unwrap();
toml::from_str(&content).unwrap()
};
@ -151,7 +151,7 @@ fn add_manifest_dependency(
}
let content = toml::to_string(&manifest).unwrap();
fs::write(mainfest_path, content).unwrap();
fs::write(manifest_path, content).unwrap();
}
fn copy_profile_configurations(workspace_root: impl AsRef<Path>) {

View File

@ -1,4 +1,4 @@
# This template file is used by the runner script to generate the acutal grub.cfg
# This template file is used by the runner script to generate the actual grub.cfg
# AUTOMATICALLY GENERATED FILE, DO NOT EDIT IF YOU KNOW WHAT YOU ARE DOING

View File

@ -113,7 +113,7 @@ fn generate_grub_cfg(
// Delete the first two lines that notes the file a template file.
let grub_cfg = grub_cfg.lines().skip(2).collect::<Vec<&str>>().join("\n");
// Set the timout style and timeout.
// Set the timeout style and timeout.
let grub_cfg = grub_cfg
.replace(
"#GRUB_TIMEOUT_STYLE#",

View File

@ -31,10 +31,10 @@ fn aster_rust_toolchain() -> String {
}
fn add_manifest_dependencies(cargo_metadata: &serde_json::Value, crate_name: &str) {
let mainfest_path = get_manifest_path(cargo_metadata, crate_name);
let manifest_path = get_manifest_path(cargo_metadata, crate_name);
let mut manifest: toml::Table = {
let content = fs::read_to_string(mainfest_path).unwrap();
let content = fs::read_to_string(manifest_path).unwrap();
toml::from_str(&content).unwrap()
};
@ -44,7 +44,7 @@ fn add_manifest_dependencies(cargo_metadata: &serde_json::Value, crate_name: &st
dependencies.as_table_mut().unwrap().extend(ostd_dep);
let content = toml::to_string(&manifest).unwrap();
fs::write(mainfest_path, content).unwrap();
fs::write(manifest_path, content).unwrap();
}
// Add `target/osdk/base` to `exclude` array of the workspace manifest

View File

@ -56,8 +56,8 @@ fn apply_args_before_finalize(action_scheme: &mut ActionScheme, args: &CommonArg
if let Some(ref mut boot) = action_scheme.boot {
apply_kv_array(&mut boot.kcmd_args, &args.kcmd_args, "=", &[]);
for init_arg in &args.init_args {
for seperated_arg in init_arg.split(' ') {
boot.init_args.push(seperated_arg.to_string());
for separated_arg in init_arg.split(' ') {
boot.init_args.push(separated_arg.to_string());
}
}
if let Some(initramfs) = &args.initramfs {

View File

@ -26,7 +26,7 @@ pub enum BootMethod {
/// Boot the kernel by making a Qcow2 image with Grub as the bootloader.
GrubQcow2,
/// Use the [QEMU direct boot](https://qemu-project.gitlab.io/qemu/system/linuxboot.html)
/// to boot the kernel with QEMU's built-in Seabios and Coreboot utilites.
/// to boot the kernel with QEMU's built-in Seabios and Coreboot utilities.
#[default]
QemuDirect,
}

View File

@ -18,7 +18,7 @@ pub use qemu::*;
pub struct Scheme {
// The user is not allowed to set this field. However,
// the manifest loader set this and all actions such
// as runnning, testing, and building will use this field.
// as running, testing, and building will use this field.
pub work_dir: Option<PathBuf>,
#[serde(default)]
pub supported_archs: Vec<Arch>,

View File

@ -45,11 +45,11 @@ pub fn split_to_kv_array(args: &str) -> Vec<String> {
pub fn apply_kv_array(
array: &mut Vec<String>,
args: &Vec<String>,
seperator: &str,
separator: &str,
multi_value_keys: &[&str],
) {
let multi_value_keys = {
let mut inferred_keys = infer_multi_value_keys(array, seperator);
let mut inferred_keys = infer_multi_value_keys(array, separator);
for key in multi_value_keys {
inferred_keys.insert(key.to_string());
}
@ -63,8 +63,8 @@ pub fn apply_kv_array(
let mut multi_value_key_strings: IndexMap<String, Vec<String>> = IndexMap::new();
for item in array.drain(..) {
// Each key-value string has two patterns:
// 1. Seperated by separator: key value / key=value
if let Some(key) = get_key(&item, seperator) {
// 1. Separated by separator: key value / key=value
if let Some(key) = get_key(&item, separator) {
if multi_value_keys.contains(&key) {
if let Some(v) = multi_value_key_strings.get_mut(&key) {
v.push(item);
@ -83,7 +83,7 @@ pub fn apply_kv_array(
}
for arg in args {
if let Some(key) = get_key(arg, seperator) {
if let Some(key) = get_key(arg, separator) {
if multi_value_keys.contains(&key) {
if let Some(v) = multi_value_key_strings.get_mut(&key) {
v.push(arg.to_owned());
@ -108,27 +108,27 @@ pub fn apply_kv_array(
}
}
fn infer_multi_value_keys(array: &Vec<String>, seperator: &str) -> IndexSet<String> {
fn infer_multi_value_keys(array: &Vec<String>, separator: &str) -> IndexSet<String> {
let mut multi_val_keys = IndexSet::new();
let mut occured_keys = IndexSet::new();
let mut occurred_keys = IndexSet::new();
for item in array {
let Some(key) = get_key(item, seperator) else {
let Some(key) = get_key(item, separator) else {
continue;
};
if occured_keys.contains(&key) {
if occurred_keys.contains(&key) {
multi_val_keys.insert(key);
} else {
occured_keys.insert(key);
occurred_keys.insert(key);
}
}
multi_val_keys
}
pub fn get_key(item: &str, seperator: &str) -> Option<String> {
let split = item.split(seperator).collect::<Vec<_>>();
pub fn get_key(item: &str, separator: &str) -> Option<String> {
let split = item.split(separator).collect::<Vec<_>>();
let len = split.len();
if len > 2 || len == 0 {
error_msg!("`{}` is an invalid argument.", item);

View File

@ -133,7 +133,7 @@ impl Default for KtestTree {
}
}
/// The `KtestTreeIter` will iterate over all crates. Yeilding `KtestCrate`s.
/// The `KtestTreeIter` will iterate over all crates. Yielding `KtestCrate`s.
pub struct KtestTreeIter<'a> {
crate_iter: btree_map::Iter<'a, String, KtestCrate>,
}
@ -156,7 +156,7 @@ impl<'a> Iterator for KtestTreeIter<'a> {
type CrateChildrenIter<'a> = btree_map::Iter<'a, String, KtestModule>;
/// The `KtestCrateIter` will iterate over all modules in a crate. Yeilding `KtestModule`s.
/// The `KtestCrateIter` will iterate over all modules in a crate. Yielding `KtestModule`s.
/// The iterator will return modules in the depth-first-search order of the module tree.
pub struct KtestCrateIter<'a> {
path: Vec<(&'a KtestModule, CrateChildrenIter<'a>)>,
@ -192,7 +192,7 @@ impl<'a> Iterator for KtestCrateIter<'a> {
}
}
/// The `KtestModuleIter` will iterate over all tests in a crate. Yeilding `KtestItem`s.
/// The `KtestModuleIter` will iterate over all tests in a crate. Yielding `KtestItem`s.
pub struct KtestModuleIter<'a> {
test_iter: core::slice::Iter<'a, KtestItem>,
}

View File

@ -38,7 +38,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
let user_pages = {
let vm_frames = FrameAllocOptions::new(nframes).alloc().unwrap();
// Phyiscal memory pages can be only accessed
// Physical memory pages can be only accessed
// via the Frame abstraction.
vm_frames.write_bytes(0, program).unwrap();
vm_frames

View File

@ -35,7 +35,7 @@ bitflags::bitflags! {
const EXECUTABLE_IMAGE = 1 << 1;
const LINE_NUMS_STRIPPED = 1 << 2;
const LOCAL_SYMS_STRIPPED = 1 << 3;
const AGGRESIVE_WS_TRIM = 1 << 4;
const AGGRESSIVE_WS_TRIM = 1 << 4;
const LARGE_ADDRESS_AWARE = 1 << 5;
const SIXTEEN_BIT_MACHINE = 1 << 6;
const BYTES_REVERSED_LO = 1 << 7;

View File

@ -9,7 +9,7 @@
//! immediately after the initialization of `ostd`. Thus you can use any
//! feature provided by the frame including the heap allocator, etc.
//!
//! By all means, ostd-test is an individule crate that only requires:
//! By all means, ostd-test is an individual crate that only requires:
//! - a custom linker script section `.ktest_array`,
//! - and an alloc implementation.
//!

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MPL-2.0 */
// The boot routine excecuted by the application processor.
// The boot routine executed by the application processor.
.extern boot_gdtr
.extern boot_page_table_start

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MPL-2.0 */
// The boot routine excecuted by the bootstrap processor.
// The boot routine executed by the bootstrap processor.
// The boot header, initial boot setup code, temporary GDT and page tables are
// in the boot section. The boot section is mapped writable since kernel may

View File

@ -13,7 +13,7 @@
//!
//! Asterinas diffrentiates the boot protocol by the entry point
//! chosen by the boot loader. In each entry point function,
//! the universal callback registeration method from
//! the universal callback registration method from
//! `crate::boot` will be called. Thus the initialization of
//! boot information is transparent for the upper level kernel.
//!

View File

@ -345,24 +345,24 @@ struct MemoryEntry {
impl MemoryEntry {
fn size(&self) -> u32 {
// SAFETY: the entry can only be contructed from a valid address.
// SAFETY: the entry can only be constructed from a valid address.
unsafe { (self.ptr as *const u32).read_unaligned() }
}
fn base_addr(&self) -> u64 {
// SAFETY: the entry can only be contructed from a valid address.
// SAFETY: the entry can only be constructed from a valid address.
unsafe { ((self.ptr + 4) as *const u64).read_unaligned() }
}
fn length(&self) -> u64 {
// SAFETY: the entry can only be contructed from a valid address.
// SAFETY: the entry can only be constructed from a valid address.
unsafe { ((self.ptr + 12) as *const u64).read_unaligned() }
}
fn memory_type(&self) -> MemoryRegionType {
// The multiboot (v1) manual doesn't specify the length of the type field.
// Experimental result shows that "u8" works. So be it.
// SAFETY: the entry can only be contructed from a valid address.
// SAFETY: the entry can only be constructed from a valid address.
let typ_val = unsafe { ((self.ptr + 20) as *const u8).read_unaligned() };
// The meaning of the values are however documented clearly by the manual.
match typ_val {

View File

@ -142,11 +142,11 @@ fn send_startup_to_all_aps() {
let icr = Icr::new(
ApicId::from(0),
DestinationShorthand::AllExcludingSelf,
TriggerMode::Egde,
TriggerMode::Edge,
Level::Assert,
DeliveryStatus::Idle,
DestinationMode::Physical,
DeliveryMode::StrartUp,
DeliveryMode::StartUp,
(AP_BOOT_START_PA / PAGE_SIZE) as u8,
);
// SAFETY: we are sending startup IPI to all APs.

View File

@ -238,7 +238,7 @@ define_cpu_exception!(
[DEVICE_NOT_AVAILABLE = 7, Fault],
[DOUBLE_FAULT = 8, Abort],
[COPROCESSOR_SEGMENT_OVERRUN = 9, Fault],
[INVAILD_TSS = 10, Fault],
[INVALID_TSS = 10, Fault],
[SEGMENT_NOT_PRESENT = 11, Fault],
[STACK_SEGMENT_FAULT = 12, Fault],
[GENERAL_PROTECTION_FAULT = 13, Fault],

View File

@ -67,7 +67,7 @@ impl SerialPort {
// set interrupt watermark at 14 bytes
self.fifo_ctrl.write(0xC7);
// Mark data terminal ready, signal request to send
// and enable auxilliary output #2 (used as interrupt line for CPU)
// and enable auxiliary output #2 (used as interrupt line for CPU)
self.modem_ctrl.write(0x0B);
// Enable interrupts
self.int_en.write(0x01);

View File

@ -117,7 +117,7 @@ impl RootTable {
)
.unwrap();
if bus_entry.is_present() {
warn!("IOMMU: Overwritting the existing device page table");
warn!("IOMMU: Overwriting the existing device page table");
}
let address = unsafe { page_table.root_paddr() };
context_table.page_tables.insert(address, page_table);

View File

@ -156,7 +156,7 @@ impl Debug for FaultRecording {
.field("Request type", &self.request_type())
.field("Address type", &self.address_type())
.field("Source identifier", &self.source_identifier())
.field("Fault Reson", &self.fault_reason())
.field("Fault Reason", &self.fault_reason())
.field("Fault info", &self.fault_info())
.field("Raw", &self.0)
.finish()

View File

@ -24,7 +24,7 @@ use crate::{
/// RSDP information, key is the signature, value is the virtual address of the signature
pub static ACPI_TABLES: Once<SpinLock<AcpiTables<AcpiMemoryHandler>>> = Once::new();
/// Sdt header wrapper, user can use this structure to easily derive Debug, get table information without creating a new struture.
/// Sdt header wrapper, user can use this structure to easily derive Debug, get table information without creating a new structure.
///
/// For example, in DMAR (DMA Remapping) structure,
/// we can use the following code to get some information of DMAR, including address, length:

View File

@ -22,7 +22,7 @@ static APIC_TYPE: Once<ApicType> = Once::new();
///
/// You should provide a closure operating on the given mutable borrow of the
/// local APIC instance. During the execution of the closure, the interrupts
/// are guarenteed to be disabled.
/// are guaranteed to be disabled.
///
/// Example:
/// ```rust
@ -38,7 +38,7 @@ pub fn borrow<R>(f: impl FnOnce(&mut (dyn Apic + 'static)) -> R) -> R {
let irq_guard = crate::trap::disable_local();
let apic_guard = APIC_INSTANCE.get_with(&irq_guard);
// If it is not initialzed, lazily initialize it.
// If it is not initialized, lazily initialize it.
if !apic_guard.is_completed() {
apic_guard.call_once(|| match APIC_TYPE.get().unwrap() {
ApicType::XApic => {
@ -115,7 +115,7 @@ enum ApicType {
/// The inter-processor interrupt control register.
///
/// ICR is a 64-bit local APIC register that allows software running on the
/// porcessor to specify and send IPIs to other porcessors in the system.
/// processor to specify and send IPIs to other processors in the system.
/// To send an IPI, software must set up the ICR to indicate the type of IPI
/// message to be sent and the destination processor or processors. (All fields
/// of the ICR are read-write by software with the exception of the delivery
@ -248,7 +248,7 @@ pub enum DestinationShorthand {
#[repr(u64)]
pub enum TriggerMode {
Egde = 0,
Edge = 0,
Level = 1,
}
@ -297,7 +297,7 @@ pub enum DeliveryMode {
/// perform an initialization.
Init = 0b101,
/// Start-up Interrupt
StrartUp = 0b110,
StartUp = 0b110,
}
#[derive(Debug)]

View File

@ -110,7 +110,7 @@ bitflags! {
const CAPABILITIES_LIST = 1 << 4;
/// Sets to 1 if the device is capable of running at 66 MHz.
const MHZ66_CAPABLE = 1 << 5;
/// Sets to 1 if the device can accpet fast back-to-back transactions
/// Sets to 1 if the device can accept fast back-to-back transactions
/// that are not from the same agent.
const FAST_BACK_TO_BACK_CAPABLE = 1 << 7;
/// This bit is only set when the following conditions are met:
@ -136,7 +136,7 @@ bitflags! {
/// Sets to 1 by a master device when its transaction is terminated with
/// Target-Abort
const RECEIVED_TARGET_ABORT = 1 << 12;
/// Sets to 1 by a master device when its transcation (except for Special
/// Sets to 1 by a master device when its transaction (except for Special
/// Cycle transactions) is terminated with Master-Abort.
const RECEIVED_MASTER_ABORT = 1 << 13;
/// Sets to 1 when the device asserts SERR#

View File

@ -59,7 +59,7 @@ pub struct PciDeviceLocation {
pub bus: u8,
/// Device number with max 31
pub device: u8,
/// Deivce number with max 7
/// Device number with max 7
pub function: u8,
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
//! The implementaion of CPU-local variables that have inner mutability.
//! The implementation of CPU-local variables that have inner mutability.
use core::cell::UnsafeCell;
@ -35,7 +35,7 @@ use crate::arch;
///
/// let _irq_guard = ostd::trap::disable_local_irq();
/// println!("1st FOO VAL: {:?}", FOO.load());
/// // No suprises here, the two accesses must result in the same value.
/// // No surprises here, the two accesses must result in the same value.
/// println!("2nd FOO VAL: {:?}", FOO.load());
/// }
/// ```

View File

@ -58,7 +58,7 @@ macro_rules! cpu_local {
/// CPU-local objects.
///
/// CPU-local objects are instanciated once per CPU core. They can be shared to
/// CPU-local objects are instantiated once per CPU core. They can be shared to
/// other cores. In the context of a preemptible kernel task, when holding the
/// reference to the inner object, the object is always the one in the original
/// core (when the reference is created), no matter which core the code is
@ -169,7 +169,7 @@ impl<T: 'static + Sync> CpuLocal<T> {
}
// SAFETY: At any given time, only one task can access the inner value `T` of a
// CPU-local variable if `T` is not `Sync`. We guarentee it by disabling the
// CPU-local variable if `T` is not `Sync`. We guarantee it by disabling the
// reference to the inner value, or turning off preemptions when creating
// the reference.
unsafe impl<T: 'static> Sync for CpuLocal<T> {}

View File

@ -50,7 +50,7 @@ pub use ostd_macros::main;
pub use ostd_pod::Pod;
pub use self::{error::Error, prelude::Result};
// [`CpuLocalCell`] is easy to be mis-used, so we don't expose it to the users.
// [`CpuLocalCell`] is easy to be misused, so we don't expose it to the users.
pub(crate) use crate::cpu::local::cpu_local_cell;
/// Initializes OSTD.

View File

@ -266,7 +266,7 @@ mod test {
}
#[ktest]
fn reader_and_wirter() {
fn reader_and_writer() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()

View File

@ -358,7 +358,7 @@ mod test {
}
#[ktest]
fn reader_and_wirter() {
fn reader_and_writer() {
let vm_segment = FrameAllocOptions::new(2)
.is_contiguous(true)
.alloc_contiguous()

Some files were not shown because too many files have changed in this diff Show More