Rename ostd::trap to ostd::irq

This commit is contained in:
Tate, Hongliang Tian 2025-09-14 01:18:28 +08:00 committed by Tate, Hongliang Tian
parent 2e46edb68d
commit c485d512f6
57 changed files with 162 additions and 166 deletions

View File

@ -10,7 +10,7 @@ use ostd::{
kernel::{MappedIrqLine, IRQ_CHIP},
trap::TrapFrame,
},
trap::irq::IrqLine,
irq::IrqLine,
};
use spin::Once;

View File

@ -13,10 +13,7 @@ use component::{init_component, ComponentInitError};
use lock::is_softirq_enabled;
use ostd::{
cpu_local_cell,
trap::{
irq::{disable_local, DisabledLocalIrqGuard},
register_bottom_half_handler,
},
irq::{disable_local, register_bottom_half_handler, DisabledLocalIrqGuard},
};
use spin::Once;

View File

@ -2,12 +2,12 @@
use ostd::{
cpu_local_cell,
irq::{disable_local, in_interrupt_context},
sync::{GuardTransfer, SpinGuardian},
task::{
atomic_mode::{AsAtomicModeGuard, InAtomicMode},
disable_preempt, DisabledPreemptGuard,
},
trap::{in_interrupt_context, irq::disable_local},
};
use crate::process_all_pending;

View File

@ -8,7 +8,7 @@ use core::{
};
use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListAtomicLink};
use ostd::{cpu::local::StaticCpuLocal, cpu_local, sync::SpinLock, trap};
use ostd::{cpu::local::StaticCpuLocal, cpu_local, irq, sync::SpinLock};
use super::{
softirq_id::{TASKLESS_SOFTIRQ_ID, TASKLESS_URGENT_SOFTIRQ_ID},
@ -131,7 +131,7 @@ fn do_schedule(
{
return;
}
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
taskless_list
.get_with(&irq_guard)
.borrow_mut()
@ -158,7 +158,7 @@ fn taskless_softirq_handler(
softirq_id: u8,
) {
let mut processing_list = {
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
let guard = taskless_list.get_with(&irq_guard);
let mut list_mut = guard.borrow_mut();
LinkedList::take(list_mut.deref_mut())
@ -170,7 +170,7 @@ fn taskless_softirq_handler(
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
taskless_list
.get_with(&irq_guard)
.borrow_mut()

View File

@ -12,13 +12,13 @@ use log::info;
#[cfg(target_arch = "x86_64")]
use ostd::arch::kernel::MappedIrqLine;
#[cfg(target_arch = "riscv64")] // TODO: Add `MappedIrqLine` support for RISC-V.
use ostd::trap::irq::IrqLine as MappedIrqLine;
use ostd::irq::IrqLine as MappedIrqLine;
#[cfg(target_arch = "loongarch64")] // TODO: Add `MappedIrqLine` support for Loongarch.
use ostd::trap::irq::IrqLine as MappedIrqLine;
use ostd::irq::IrqLine as MappedIrqLine;
use ostd::{
io::IoMem,
irq::IrqLine,
mm::{HasPaddr, VmIoOnce},
trap::irq::IrqLine,
Error, Result,
};

View File

@ -28,7 +28,7 @@ pub(super) fn init() {
fn x86_probe() {
use common_device::{mmio_check_magic, mmio_read_device_id, MmioCommonDevice};
use log::debug;
use ostd::{arch::kernel::IRQ_CHIP, io::IoMem, trap::irq::IrqLine};
use ostd::{arch::kernel::IRQ_CHIP, io::IoMem, irq::IrqLine};
// TODO: The correct method for detecting VirtIO-MMIO devices on x86_64 systems is to parse the
// kernel command line if ACPI tables are absent [1], or the ACPI SSDT if ACPI tables are

View File

@ -9,9 +9,9 @@ use log::warn;
use ostd::{
bus::pci::cfg_space::Bar,
io::IoMem,
irq::IrqCallbackFunction,
mm::{DmaCoherent, HasDaddr, PAGE_SIZE},
sync::RwLock,
trap::irq::IrqCallbackFunction,
};
use super::{

View File

@ -8,8 +8,8 @@ use aster_util::safe_ptr::SafePtr;
use ostd::{
arch::trap::TrapFrame,
io::IoMem,
irq::{IrqCallbackFunction, IrqLine},
sync::RwLock,
trap::irq::{IrqCallbackFunction, IrqLine},
};
/// Multiplexing Irqs. The two interrupt types (configuration space change and queue interrupt)

View File

@ -8,8 +8,8 @@ use ostd::{
arch::device::io_port::{PortRead, PortWrite},
bus::pci::cfg_space::Bar,
io::IoMem,
irq::IrqCallbackFunction,
mm::{DmaCoherent, PodOnce},
trap::irq::IrqCallbackFunction,
Pod,
};

View File

@ -14,8 +14,8 @@ use ostd::{
BusProbeError,
},
io::IoMem,
irq::IrqCallbackFunction,
mm::{DmaCoherent, HasDaddr},
trap::irq::IrqCallbackFunction,
};
use super::{common_cfg::VirtioPciCommonCfg, msix::VirtioMsixManager};

View File

@ -11,8 +11,8 @@ use ostd::{
BusProbeError,
},
io::IoMem,
irq::IrqCallbackFunction,
mm::{DmaCoherent, HasDaddr, PAGE_SIZE},
trap::irq::IrqCallbackFunction,
};
use crate::{

View File

@ -2,7 +2,7 @@
use alloc::vec::Vec;
use ostd::{bus::pci::capability::msix::CapabilityMsixData, trap::irq::IrqLine};
use ostd::{bus::pci::capability::msix::CapabilityMsixData, irq::IrqLine};
pub struct VirtioMsixManager {
config_msix_vector: u16,

View File

@ -10,6 +10,7 @@ use core::{fmt, sync::atomic::Ordering};
use ostd::{
arch::read_tsc as sched_clock,
cpu::{all_cpus, CpuId, PinCurrentCpu},
irq::disable_local,
sync::SpinLock,
task::{
scheduler::{
@ -18,7 +19,6 @@ use ostd::{
},
AtomicCpuId, Task,
},
trap::irq::disable_local,
};
use super::{

View File

@ -6,8 +6,8 @@ use core::{alloc::Layout, cell::RefCell};
use ostd::{
cpu_local,
irq::DisabledLocalIrqGuard,
mm::{Paddr, PAGE_SIZE},
trap::irq::DisabledLocalIrqGuard,
};
cpu_local! {

View File

@ -32,8 +32,8 @@ use core::alloc::Layout;
use ostd::{
cpu::PinCurrentCpu,
irq,
mm::{frame::GlobalFrameAllocator, Paddr},
trap,
};
mod cache;
@ -64,7 +64,7 @@ pub struct FrameAllocator;
impl GlobalFrameAllocator for FrameAllocator {
fn alloc(&self, layout: Layout) -> Option<Paddr> {
let guard = trap::irq::disable_local();
let guard = irq::disable_local();
let res = cache::alloc(&guard, layout);
if res.is_some() {
TOTAL_FREE_SIZE.sub(guard.current_cpu(), layout.size());
@ -73,13 +73,13 @@ impl GlobalFrameAllocator for FrameAllocator {
}
fn dealloc(&self, addr: Paddr, size: usize) {
let guard = trap::irq::disable_local();
let guard = irq::disable_local();
TOTAL_FREE_SIZE.add(guard.current_cpu(), size);
cache::dealloc(&guard, addr, size);
}
fn add_free_memory(&self, addr: Paddr, size: usize) {
let guard = trap::irq::disable_local();
let guard = irq::disable_local();
TOTAL_FREE_SIZE.add(guard.current_cpu(), size);
pools::add_free_memory(&guard, addr, size);
}

View File

@ -11,9 +11,9 @@ use core::{
use ostd::{
cpu_local,
irq::DisabledLocalIrqGuard,
mm::Paddr,
sync::{LocalIrqDisabled, SpinLock, SpinLockGuard},
trap::irq::DisabledLocalIrqGuard,
};
use crate::chunk::{greater_order_of, lesser_order_of, size_of_order, split_to_chunks, BuddyOrder};

View File

@ -89,7 +89,7 @@ impl FastSmpCounter {
#[cfg(ktest)]
mod test {
use ostd::{cpu::PinCurrentCpu, prelude::*, trap};
use ostd::{cpu::PinCurrentCpu, irq, prelude::*};
#[ktest]
fn test_per_cpu_counter() {
@ -98,7 +98,7 @@ mod test {
pub static FREE_SIZE_COUNTER: usize;
}
let guard = trap::irq::disable_local();
let guard = irq::disable_local();
let cur_cpu = guard.current_cpu();
FREE_SIZE_COUNTER.add(cur_cpu, 10);
assert_eq!(FREE_SIZE_COUNTER.get(), 10);

View File

@ -8,13 +8,12 @@ use core::{
};
use ostd::{
cpu_local,
cpu_local, irq,
mm::{
heap::{GlobalHeapAllocator, HeapSlot, SlabSlotList, SlotInfo},
PAGE_SIZE,
},
sync::{LocalIrqDisabled, SpinLock},
trap,
};
use crate::slab_cache::SlabCache;
@ -296,7 +295,7 @@ impl GlobalHeapAllocator for HeapAllocator {
return HeapSlot::alloc_large(layout.size().div_ceil(PAGE_SIZE) * PAGE_SIZE);
};
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();
@ -309,7 +308,7 @@ impl GlobalHeapAllocator for HeapAllocator {
return Ok(());
};
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
let this_cache = LOCAL_POOL.get_with(&irq_guard);
let mut local_cache = this_cache.borrow_mut();

View File

@ -54,7 +54,7 @@ fn main() {
#[ostd::ktest::panic_handler]
fn panic_handler(info: &core::panic::PanicInfo) -> ! {
let _irq_guard = ostd::trap::irq::disable_local();
let _irq_guard = ostd::irq::disable_local();
use alloc::{boxed::Box, string::ToString};

View File

@ -11,7 +11,7 @@ use crate::{
mm::tlb_flush_addr,
trap::{RawUserContext, TrapFrame},
},
trap::call_irq_callback_functions,
irq::call_irq_callback_functions,
user::{ReturnReason, UserContextApi, UserContextApiInternal},
};

View File

@ -13,8 +13,8 @@ pub use trap::TrapFrame;
use crate::{
arch::{cpu::context::CpuExceptionInfo, mm::tlb_flush_addr},
cpu_local_cell,
irq::call_irq_callback_functions,
mm::MAX_USERSPACE_VADDR,
trap::call_irq_callback_functions,
};
cpu_local_cell! {

View File

@ -11,7 +11,7 @@ use crate::{
trap::{RawUserContext, TrapFrame},
TIMER_IRQ_NUM,
},
trap::call_irq_callback_functions,
irq::call_irq_callback_functions,
user::{ReturnReason, UserContextApi, UserContextApiInternal},
};

View File

@ -12,8 +12,8 @@ use spin::Once;
use crate::{
arch::{self, boot::DEVICE_TREE, cpu::extension::IsaExtensions, trap::TrapFrame},
cpu::{CpuId, PinCurrentCpu},
irq::{self, IrqLine},
timer::INTERRUPT_CALLBACKS,
trap::{self, irq::IrqLine},
};
/// The timer frequency (Hz). Here we choose 1000Hz since 1000Hz is easier for
@ -80,7 +80,7 @@ pub(super) unsafe fn init() {
}
fn timer_callback(_: &TrapFrame) {
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
if irq_guard.current_cpu() == CpuId::bsp() {
crate::timer::jiffies::ELAPSED.fetch_add(1, Ordering::Relaxed);
}

View File

@ -13,7 +13,7 @@ pub(super) use trap::RawUserContext;
pub use trap::TrapFrame;
use super::{cpu::context::CpuExceptionInfo, timer::TIMER_IRQ_NUM};
use crate::{cpu_local_cell, trap::call_irq_callback_functions};
use crate::{cpu_local_cell, irq::call_irq_callback_functions};
cpu_local_cell! {
static IS_KERNEL_INTERRUPTED: bool = false;

View File

@ -19,9 +19,9 @@ use x86_64::registers::{
use crate::{
arch::trap::{RawUserContext, TrapFrame},
irq::call_irq_callback_functions,
mm::Vaddr,
task::scheduler,
trap::call_irq_callback_functions,
user::{ReturnReason, UserContextApi, UserContextApiInternal},
};
@ -70,8 +70,8 @@ pub struct GeneralRegs {
/// Architectural CPU exceptions (x86-64 vectors 0-31).
///
/// For the authoritative specification of each vector, see the
/// Intel® 64 and IA-32 Architectures Software Developers Manual,
/// For the authoritative specification of each vector, see the
/// Intel® 64 and IA-32 Architectures Software Developers Manual,
/// Volume 3 “System Programming Guide”, Chapter 6 “Interrupt and Exception
/// Handling”, in particular Section 6.15 “Exception and Interrupt
/// Reference”.
@ -112,14 +112,14 @@ pub enum CpuException {
SegmentNotPresent(SelectorErrorCode),
/// 12 #SS Stack-segment fault.
StackSegmentFault(SelectorErrorCode),
/// 13 #GP General protection fault
/// 13 #GP General protection fault
GeneralProtectionFault(Option<SelectorErrorCode>),
/// 14 #PF Page fault.
PageFault(RawPageFaultInfo),
// 15: Reserved
/// 16 #MF x87 floating-point exception.
X87FloatingPointException,
/// 17 #AC Alignment check.
/// 17 #AC Alignment check.
AlignmentCheck,
/// 18 #MC Machine check.
MachineCheck,

View File

@ -11,8 +11,8 @@ use volatile::{access::ReadWrite, VolatileRef};
use super::registers::Capability;
use crate::{
arch::trap::TrapFrame,
irq::IrqLine,
sync::{LocalIrqDisabled, SpinLock},
trap::irq::IrqLine,
};
#[derive(Debug)]

View File

@ -76,7 +76,7 @@ impl super::Apic for X2Apic {
}
unsafe fn send_ipi(&self, icr: super::Icr) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
// SAFETY: These `rdmsr` and `wrmsr` instructions write the interrupt command to APIC and
// wait for results. The caller guarantees it's safe to execute this interrupt command.
unsafe {

View File

@ -80,7 +80,7 @@ impl super::Apic for XApic {
}
unsafe fn send_ipi(&self, icr: super::Icr) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
self.write(xapic::XAPIC_ESR, 0);
// The upper 32 bits of ICR must be written into XAPIC_ICR1 first,
// because writing into XAPIC_ICR0 will trigger the action of

View File

@ -11,7 +11,7 @@ use volatile::{
};
use crate::{
arch::if_tdx_enabled, io::IoMemAllocatorBuilder, mm::paddr_to_vaddr, trap::irq::IrqLine, Error,
arch::if_tdx_enabled, io::IoMemAllocatorBuilder, irq::IrqLine, mm::paddr_to_vaddr, Error,
Result,
};

View File

@ -11,7 +11,7 @@ use log::info;
use spin::Once;
use super::acpi::get_acpi_tables;
use crate::{io::IoMemAllocatorBuilder, sync::SpinLock, trap::irq::IrqLine, Error, Result};
use crate::{io::IoMemAllocatorBuilder, irq::IrqLine, sync::SpinLock, Error, Result};
mod ioapic;
mod pic;

View File

@ -12,7 +12,7 @@ use crate::{
},
trap::TrapFrame,
},
trap::irq::IrqLine,
irq::IrqLine,
};
/// The frequency in Hz of the Time Stamp Counter (TSC).

View File

@ -12,8 +12,8 @@ use crate::{
trap::TrapFrame,
tsc_freq,
},
irq::IrqLine,
task::disable_preempt,
trap::irq::IrqLine,
};
/// Initializes APIC with TSC-deadline mode or periodic mode.

View File

@ -12,8 +12,8 @@ use volatile::{
use crate::{
arch::kernel::{acpi::get_acpi_tables, MappedIrqLine, IRQ_CHIP},
irq::IrqLine,
mm::paddr_to_vaddr,
trap::irq::IrqLine,
};
static HPET_INSTANCE: Once<Hpet> = Once::new();

View File

@ -14,8 +14,8 @@ use super::trap::TrapFrame;
use crate::{
arch::kernel,
cpu::{CpuId, PinCurrentCpu},
irq::{self, IrqLine},
timer::INTERRUPT_CALLBACKS,
trap::{self, irq::IrqLine},
};
/// The timer frequency (Hz).
@ -62,7 +62,7 @@ pub(super) fn init_ap() {
}
fn timer_callback(_: &TrapFrame) {
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
if irq_guard.current_cpu() == CpuId::bsp() {
crate::timer::jiffies::ELAPSED.fetch_add(1, Ordering::SeqCst);
}

View File

@ -16,7 +16,7 @@ use crate::{
timer::TIMER_FREQ,
},
io::{sensitive_io_port, IoPort},
trap::irq::IrqLine,
irq::IrqLine,
};
/// PIT Operating Mode.

View File

@ -33,13 +33,13 @@ use crate::{
irq::{disable_local, enable_local},
},
cpu_local_cell,
irq::call_irq_callback_functions,
mm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, MAX_USERSPACE_VADDR, PAGE_SIZE,
},
task::disable_preempt,
trap::call_irq_callback_functions,
};
cfg_if! {

View File

@ -14,8 +14,8 @@ use crate::{
common_device::PciCommonDevice,
device_info::PciDeviceLocation,
},
irq::IrqLine,
mm::VmIoOnce,
trap::irq::IrqLine,
};
/// MSI-X capability. It will set the BAR space it uses to be hidden.

View File

@ -33,7 +33,7 @@ use crate::arch;
/// // You can avoid this by disabling interrupts (and preemption, if needed).
/// println!("BAR VAL: {:?}", BAR.load());
///
/// let _irq_guard = ostd::trap::irq::disable_local_irq();
/// let _irq_guard = ostd::irq::disable_local_irq();
/// println!("1st FOO VAL: {:?}", FOO.load());
/// // No surprises here, the two accesses must result in the same value.
/// println!("2nd FOO VAL: {:?}", FOO.load());

View File

@ -9,8 +9,8 @@ use bitvec::prelude::{bitvec, BitVec};
use super::{AnyStorage, CpuLocal};
use crate::{
cpu::{all_cpus, num_cpus, CpuId, PinCurrentCpu},
irq::DisabledLocalIrqGuard,
mm::{paddr_to_vaddr, FrameAllocOptions, HasPaddr, Segment, Vaddr, PAGE_SIZE},
trap::irq::DisabledLocalIrqGuard,
Result,
};

View File

@ -55,8 +55,8 @@ use static_cpu_local::StaticStorage;
use super::CpuId;
use crate::{
irq::DisabledLocalIrqGuard,
mm::{frame::allocator, paddr_to_vaddr, Paddr, PAGE_SIZE},
trap::irq::DisabledLocalIrqGuard,
};
/// Dynamically-allocated CPU-local objects.
@ -324,7 +324,7 @@ mod test {
crate::cpu_local! {
static FOO: RefCell<usize> = RefCell::new(1);
}
let irq_guard = crate::trap::irq::disable_local();
let irq_guard = crate::irq::disable_local();
let foo_guard = FOO.get_with(&irq_guard);
assert_eq!(*foo_guard.borrow(), 1);
*foo_guard.borrow_mut() = 2;
@ -337,7 +337,7 @@ mod test {
crate::cpu_local_cell! {
static BAR: usize = 3;
}
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
assert_eq!(BAR.load(), 3);
BAR.store(4);
assert_eq!(BAR.load(), 4);

View File

@ -41,7 +41,7 @@ pub trait SingleInstructionAddAssign<Rhs = Self> {
impl<T: num_traits::WrappingAdd + Copy> SingleInstructionAddAssign<T> for T {
default unsafe fn add_assign(offset: *mut Self, rhs: T) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let addr = (base + offset as usize) as *mut Self;
// SAFETY:
@ -67,7 +67,7 @@ pub trait SingleInstructionSubAssign<Rhs = Self> {
impl<T: num_traits::WrappingSub + Copy> SingleInstructionSubAssign<T> for T {
default unsafe fn sub_assign(offset: *mut Self, rhs: T) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let addr = (base + offset as usize) as *mut Self;
// SAFETY: Same as `add_assign`.
@ -87,7 +87,7 @@ pub trait SingleInstructionBitOrAssign<Rhs = Self> {
impl<T: core::ops::BitOr<Output = T> + Copy> SingleInstructionBitOrAssign<T> for T {
default unsafe fn bitor_assign(offset: *mut Self, rhs: T) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let addr = (base + offset as usize) as *mut Self;
// SAFETY: Same as `add_assign`.
@ -107,7 +107,7 @@ pub trait SingleInstructionBitAndAssign<Rhs = Self> {
impl<T: core::ops::BitAnd<Output = T> + Copy> SingleInstructionBitAndAssign<T> for T {
default unsafe fn bitand_assign(offset: *mut Self, rhs: T) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let addr = (base + offset as usize) as *mut Self;
// SAFETY: Same as `add_assign`.
@ -127,7 +127,7 @@ pub trait SingleInstructionBitXorAssign<Rhs = Self> {
impl<T: core::ops::BitXor<Output = T> + Copy> SingleInstructionBitXorAssign<T> for T {
default unsafe fn bitxor_assign(offset: *mut Self, rhs: T) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let addr = (base + offset as usize) as *mut Self;
// SAFETY: Same as `add_assign`.
@ -147,7 +147,7 @@ pub trait SingleInstructionLoad {
impl<T: Copy> SingleInstructionLoad for T {
default unsafe fn load(offset: *const Self) -> Self {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let ptr = (base + offset as usize) as *const Self;
// SAFETY: Same as `add_assign`.
@ -167,7 +167,7 @@ pub trait SingleInstructionStore {
impl<T: Copy> SingleInstructionStore for T {
default unsafe fn store(offset: *mut Self, val: Self) {
let _guard = crate::trap::irq::disable_local();
let _guard = crate::irq::disable_local();
let base = crate::arch::cpu::local::get_base() as usize;
let ptr = (base + offset as usize) as *mut Self;
// SAFETY: Same as `add_assign`.

View File

@ -10,7 +10,7 @@
use core::marker::PhantomData;
use super::{AnyStorage, CpuLocal, __cpu_local_end, __cpu_local_start};
use crate::{arch, cpu::CpuId, trap::irq::DisabledLocalIrqGuard};
use crate::{arch, cpu::CpuId, irq::DisabledLocalIrqGuard};
/// Defines a statically-allocated CPU-local variable.
///
@ -38,7 +38,7 @@ use crate::{arch, cpu::CpuId, trap::irq::DisabledLocalIrqGuard};
/// let val_of_foo = ref_of_foo.load(Ordering::Relaxed);
/// println!("FOO VAL: {}", val_of_foo);
///
/// let irq_guard = trap::irq::disable_local();
/// let irq_guard = irq::disable_local();
/// let bar_guard = BAR.get_with(&irq_guard);
/// let val_of_bar = bar_guard.get();
/// println!("BAR VAL: {}", val_of_bar);

View File

@ -136,7 +136,7 @@ unsafe fn set_this_cpu_id(id: u32) {
/// The implementor must ensure that the current task is pinned to the current
/// CPU while any one of the instances of the implemented structure exists.
///
/// [`DisabledLocalIrqGuard`]: crate::trap::irq::DisabledLocalIrqGuard
/// [`DisabledLocalIrqGuard`]: crate::irq::DisabledLocalIrqGuard
/// [`DisabledPreemptGuard`]: crate::task::DisabledPreemptGuard
pub unsafe trait PinCurrentCpu {
/// Returns the ID of the current CPU.

View File

@ -2,7 +2,7 @@
use spin::Once;
use super::irq::{disable_local, process_top_half, DisabledLocalIrqGuard};
use super::{disable_local, line::process_top_half, DisabledLocalIrqGuard};
use crate::{arch::trap::TrapFrame, cpu_local_cell, task::disable_preempt};
static BOTTOM_HALF_HANDLER: Once<fn(DisabledLocalIrqGuard) -> DisabledLocalIrqGuard> = Once::new();

View File

@ -9,12 +9,11 @@ use spin::Once;
use crate::{
arch::{
irq::{self, IrqRemapping, IRQ_NUM_MAX, IRQ_NUM_MIN},
irq::{IrqRemapping, IRQ_NUM_MAX, IRQ_NUM_MIN},
trap::TrapFrame,
},
prelude::*,
sync::{GuardTransfer, RwLock, SpinLock, WriteIrqDisabled},
task::atomic_mode::InAtomicMode,
sync::{RwLock, SpinLock, WriteIrqDisabled},
Error,
};
@ -193,73 +192,6 @@ pub(super) fn process_top_half(trap_frame: &TrapFrame, irq_num: usize) {
}
}
// ####### IRQ Guards #######
/// Disables all IRQs on the current CPU (i.e., locally).
///
/// This function returns a guard object, which will automatically enable local IRQs again when
/// it is dropped. This function works correctly even when it is called in a _nested_ way.
/// The local IRQs shall only be re-enabled when the most outer guard is dropped.
///
/// This function can play nicely with [`SpinLock`] as the type uses this function internally.
/// One can invoke this function even after acquiring a spin lock. And the reversed order is also ok.
///
/// [`SpinLock`]: crate::sync::SpinLock
///
/// # Example
///
/// ```rust
/// use ostd::trap;
///
/// {
/// let _ = trap::irq::disable_local();
/// todo!("do something when irqs are disabled");
/// }
/// ```
pub fn disable_local() -> DisabledLocalIrqGuard {
DisabledLocalIrqGuard::new()
}
/// A guard for disabled local IRQs.
#[clippy::has_significant_drop]
#[must_use]
#[derive(Debug)]
pub struct DisabledLocalIrqGuard {
was_enabled: bool,
}
impl !Send for DisabledLocalIrqGuard {}
// SAFETY: The guard disables local IRQs, which meets the first
// sufficient condition for atomic mode.
unsafe impl InAtomicMode for DisabledLocalIrqGuard {}
impl DisabledLocalIrqGuard {
fn new() -> Self {
let was_enabled = irq::is_local_enabled();
if was_enabled {
irq::disable_local();
}
Self { was_enabled }
}
}
impl GuardTransfer for DisabledLocalIrqGuard {
fn transfer_to(&mut self) -> Self {
let was_enabled = self.was_enabled;
self.was_enabled = false;
Self { was_enabled }
}
}
impl Drop for DisabledLocalIrqGuard {
fn drop(&mut self) {
if self.was_enabled {
irq::enable_local();
}
}
}
#[cfg(ktest)]
mod test {
use super::*;

77
ostd/src/irq/mod.rs Normal file
View File

@ -0,0 +1,77 @@
// SPDX-License-Identifier: MPL-2.0
//! Interrupt ReQuest (IRQ) handling.
mod handler;
mod line;
pub(crate) use handler::call_irq_callback_functions;
pub use handler::{in_interrupt_context, register_bottom_half_handler};
pub use line::{IrqCallbackFunction, IrqLine};
use crate::{arch::irq as arch_irq, sync::GuardTransfer, task::atomic_mode::InAtomicMode};
/// Disables all IRQs on the current CPU (i.e., locally).
///
/// This function returns a guard object, which will automatically enable local IRQs again when
/// it is dropped. This function works correctly even when it is called in a _nested_ way.
/// The local IRQs shall only be re-enabled when the most outer guard is dropped.
///
/// This function can play nicely with [`SpinLock`] as the type uses this function internally.
/// One can invoke this function even after acquiring a spin lock. And the reversed order is also ok.
///
/// [`SpinLock`]: crate::sync::SpinLock
///
/// # Example
///
/// ```rust
/// use ostd::irq;
///
/// {
/// let _ = irq::disable_local();
/// todo!("do something when irqs are disabled");
/// }
/// ```
pub fn disable_local() -> DisabledLocalIrqGuard {
DisabledLocalIrqGuard::new()
}
/// A guard for disabled local IRQs.
#[clippy::has_significant_drop]
#[must_use]
#[derive(Debug)]
pub struct DisabledLocalIrqGuard {
was_enabled: bool,
}
impl !Send for DisabledLocalIrqGuard {}
// SAFETY: The guard disables local IRQs, which meets the first
// sufficient condition for atomic mode.
unsafe impl InAtomicMode for DisabledLocalIrqGuard {}
impl DisabledLocalIrqGuard {
fn new() -> Self {
let was_enabled = arch_irq::is_local_enabled();
if was_enabled {
arch_irq::disable_local();
}
Self { was_enabled }
}
}
impl GuardTransfer for DisabledLocalIrqGuard {
fn transfer_to(&mut self) -> Self {
let was_enabled = self.was_enabled;
self.was_enabled = false;
Self { was_enabled }
}
}
impl Drop for DisabledLocalIrqGuard {
fn drop(&mut self) {
if self.was_enabled {
arch_irq::enable_local();
}
}
}

View File

@ -42,6 +42,7 @@ pub mod console;
pub mod cpu;
mod error;
pub mod io;
pub mod irq;
pub mod logger;
pub mod mm;
pub mod panic;
@ -50,7 +51,6 @@ pub mod smp;
pub mod sync;
pub mod task;
pub mod timer;
pub mod trap;
pub mod user;
pub mod util;

View File

@ -82,7 +82,7 @@ impl<'a, G: PinCurrentCpu> TlbFlusher<'a, G> {
/// function. But it may not be synchronous. Upon the return of this
/// function, the TLB entries may not be coherent.
pub fn dispatch_tlb_flush(&mut self) {
let irq_guard = crate::trap::irq::disable_local();
let irq_guard = crate::irq::disable_local();
if self.ops_stack.is_empty() {
return;

View File

@ -24,7 +24,7 @@ use gimli::Register;
#[linkage = "weak"]
#[no_mangle]
pub fn __ostd_panic_handler(info: &core::panic::PanicInfo) -> ! {
let _irq_guard = crate::trap::irq::disable_local();
let _irq_guard = crate::irq::disable_local();
crate::cpu_local_cell! {
static IN_PANIC: bool = false;

View File

@ -16,8 +16,8 @@ use crate::{
},
cpu::{set::CpuSet, PinCurrentCpu},
cpu_local,
irq::{self, IrqLine},
sync::SpinLock,
trap::{self, irq::IrqLine},
};
/// Executes a function on other processors.
@ -35,7 +35,7 @@ use crate::{
/// The function `f` will be executed asynchronously on the target processors.
/// However if called on the current processor, it will be synchronous.
pub fn inter_processor_call(targets: &CpuSet, f: fn()) {
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
let this_cpu_id = irq_guard.current_cpu();
let ipi_data = IPI_GLOBAL_DATA.get().unwrap();

View File

@ -1,8 +1,8 @@
// SPDX-License-Identifier: MPL-2.0
use crate::{
irq::{disable_local, DisabledLocalIrqGuard},
task::{atomic_mode::AsAtomicModeGuard, disable_preempt, DisabledPreemptGuard},
trap::irq::{disable_local, DisabledLocalIrqGuard},
};
/// A guardian that denotes the guard behavior for holding a spin-based lock.

View File

@ -9,13 +9,13 @@ use crate::{
PinCurrentCpu,
},
impl_frame_meta_for,
irq::DisabledLocalIrqGuard,
mm::{
kspace::kvirt_area::KVirtArea,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
FrameAllocOptions, PAGE_SIZE,
},
prelude::*,
trap::irq::DisabledLocalIrqGuard,
};
/// The kernel stack size of a task, specified in pages.

View File

@ -27,7 +27,7 @@ pub use self::{
preempt::{disable_preempt, halt_cpu, DisabledPreemptGuard},
scheduler::info::{AtomicCpuId, TaskScheduleInfo},
};
use crate::{arch::task::TaskContext, prelude::*, trap::in_interrupt_context};
use crate::{arch::task::TaskContext, irq::in_interrupt_context, prelude::*};
static PRE_SCHEDULE_HANDLER: Once<fn()> = Once::new();

View File

@ -23,7 +23,7 @@ pub use self::guard::{disable_preempt, DisabledPreemptGuard};
pub fn halt_cpu() {
crate::task::atomic_mode::might_sleep();
let irq_guard = crate::trap::irq::disable_local();
let irq_guard = crate::irq::disable_local();
if cpu_local::need_preempt() {
drop(irq_guard);

View File

@ -7,7 +7,7 @@ use super::{Task, POST_SCHEDULE_HANDLER, PRE_SCHEDULE_HANDLER};
use crate::{
arch::task::{context_switch, first_context_switch},
cpu_local_cell,
trap::irq::DisabledLocalIrqGuard,
irq::DisabledLocalIrqGuard,
};
cpu_local_cell! {
@ -45,7 +45,7 @@ pub(super) fn switch_to_task(next_task: Arc<Task>) {
crate::sync::finish_grace_period();
}
let irq_guard = crate::trap::irq::disable_local();
let irq_guard = crate::irq::disable_local();
before_switching_to(&next_task, &irq_guard);

View File

@ -9,7 +9,7 @@ use core::cell::RefCell;
pub use jiffies::Jiffies;
use crate::{cpu_local, trap};
use crate::{cpu_local, irq};
type InterruptCallback = Box<dyn Fn() + Sync + Send>;
@ -22,7 +22,7 @@ pub fn register_callback<F>(func: F)
where
F: Fn() + Sync + Send + 'static,
{
let irq_guard = trap::irq::disable_local();
let irq_guard = irq::disable_local();
INTERRUPT_CALLBACKS
.get_with(&irq_guard)
.borrow_mut()

View File

@ -1,9 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Handles trap across kernel and user space.
mod handler;
pub mod irq;
pub(crate) use handler::call_irq_callback_functions;
pub use handler::{in_interrupt_context, register_bottom_half_handler};