Implement `IdSet::iter_in`

This patch enables more expressive ways to slice and iterate over
the `Id`s in an `IdSet` with `IdSet::iter_in`, which takes an arbitrary
`IdSetSlicer`.

`IdSet::iter_in` efficiently slices out unintended inner parts and
then, within the remaining parts, skips inactive bits by using
`BitSlice::iter_ones` from the `bitvec` crate.

It also delivers several implementations of `IdSetSlicer` so OSTD
consumers can represent `Id` ranges ergonomically.

In the Asterinas kernel, `CpuSet::iter_in` enables a cleaner way to
define an interator that cycles over the CPUs.
This commit is contained in:
Arthur Paulino 2025-10-22 15:57:03 -03:00 committed by Tate, Hongliang Tian
parent 01d9b61337
commit 6a67807fd0
22 changed files with 364 additions and 62 deletions

View File

@ -173,7 +173,7 @@ impl CpuInformation {
/// Constructs the information for the current CPU.
pub fn new(guard: &DisabledPreemptGuard) -> Self {
Self {
processor: guard.current_cpu().as_usize() as u32,
processor: guard.current_cpu().into(),
}
}
}

View File

@ -169,7 +169,7 @@ impl CpuInformation {
/// Constructs the information for the current CPU.
pub fn new(guard: &DisabledPreemptGuard) -> Self {
Self {
processor: guard.current_cpu().as_usize() as u32,
processor: guard.current_cpu().into(),
}
}
}

View File

@ -233,7 +233,7 @@ impl CpuInformation {
/// Constructs the information for the current CPU.
pub fn new(guard: &DisabledPreemptGuard) -> Self {
let mut result = Self {
processor: guard.current_cpu().as_usize() as u32,
processor: guard.current_cpu().into(),
vendor_id: "unknown".to_owned(),
cpu_family: 0,
model: 0,

View File

@ -10,6 +10,7 @@ use core::fmt::Write;
use aster_softirq::{
iter_irq_counts_across_all_cpus, iter_softirq_counts_across_all_cpus, softirq_id::*,
};
use ostd::util::id_set::Id;
use crate::{
fs::{

View File

@ -30,6 +30,7 @@ use ostd::{
arch::qemu::{exit_qemu, QemuExitCode},
boot::boot_info,
cpu::CpuId,
util::id_set::Id,
};
use process::{spawn_init_process, Process};
use sched::SchedPolicy;

View File

@ -5,11 +5,11 @@
#![warn(unused)]
use alloc::{boxed::Box, sync::Arc};
use core::{fmt, sync::atomic::Ordering};
use core::{fmt, ops::Bound, sync::atomic::Ordering};
use ostd::{
arch::read_tsc as sched_clock,
cpu::{all_cpus, CpuId, PinCurrentCpu},
cpu::{all_cpus, CpuId, CpuSet, PinCurrentCpu},
irq::disable_local,
sync::{LocalIrqDisabled, SpinLock},
task::{
@ -19,6 +19,7 @@ use ostd::{
},
AtomicCpuId, Task,
},
util::id_set::Id,
};
use super::{
@ -286,28 +287,14 @@ impl ClassScheduler {
return last_cpu;
}
debug_assert!(flags == EnqueueFlags::Spawn);
let guard = disable_local();
let affinity = thread.atomic_cpu_affinity().load(Ordering::Relaxed);
let mut selected = guard.current_cpu();
let mut minimum_load = u32::MAX;
let last_chosen = match self.last_chosen_cpu.get() {
Some(cpu) => cpu.as_usize() as isize,
None => -1,
};
// Simulate a round-robin selection starting from the last chosen CPU.
//
// It still checks every CPU to find the one with the minimum load, but
// avoids keeping selecting the same CPU when there are multiple equally
// idle CPUs.
let affinity_iter = affinity
.iter()
.filter(|&cpu| cpu.as_usize() as isize > last_chosen)
.chain(
affinity
.iter()
.filter(|&cpu| cpu.as_usize() as isize <= last_chosen),
);
for candidate in affinity_iter {
// Set `selected` as `candidate` if the candidate's load is smaller.
let test_candidate = |candidate: CpuId| {
let PerCpuLoadStats { queue_len, .. } =
self.rqs[candidate.as_usize()].lock().load_stats();
let load = queue_len;
@ -315,10 +302,36 @@ impl ClassScheduler {
minimum_load = load;
selected = candidate;
}
};
let affinity = thread.atomic_cpu_affinity().load(Ordering::Relaxed);
match self.last_chosen_cpu.get() {
Some(cpu) => {
// Perform a round-robin selection starting after the last chosen CPU.
//
// It still checks every CPU in the affinity set to find the one with the
// minimum load, but avoids selecting the same CPU again in case of a tie.
Self::cycle_after(cpu, &affinity).for_each(test_candidate)
}
None => affinity.iter().for_each(test_candidate),
}
self.last_chosen_cpu.set_anyway(selected);
selected
}
/// Returns a cycling iterator over the CPUs in the [`CpuSet`], starting *after*
/// the given [`CpuId`].
///
/// The iteration order is ascending up to the wrapping point, after which it
/// continues from the first CPU in the set in ascending order again.
///
/// If the given [`CpuId`] is in the set, it will be the last element yielded.
fn cycle_after(cpu: CpuId, cpu_set: &CpuSet) -> impl Iterator<Item = CpuId> + '_ {
cpu_set
.iter_in((Bound::Excluded(cpu), Bound::Unbounded))
.chain(cpu_set.iter_in(..=cpu))
}
}
impl PerCpuClassRqSet {

View File

@ -9,7 +9,7 @@ pub fn sys_getcpu(cpu: Vaddr, node: Vaddr, _tcache: Vaddr, ctx: &Context) -> Res
// The third argument `tcache` is unused since Linux 2.6.24, so we ignore it.
// The system call itself is inherently racy, so using `current_racy` here should be fine.
let current_cpu = CpuId::current_racy().as_usize() as u32;
let current_cpu: u32 = CpuId::current_racy().into();
// TODO: Support NUMA.
let current_node = 0u32;

View File

@ -2,7 +2,10 @@
use core::{cmp, sync::atomic::Ordering};
use ostd::cpu::{num_cpus, CpuId, CpuSet};
use ostd::{
cpu::{num_cpus, CpuId, CpuSet},
util::id_set::Id,
};
use super::SyscallReturn;
use crate::{prelude::*, process::posix_thread::thread_table, thread::Tid};

View File

@ -22,7 +22,7 @@ use core::{
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
};
use ostd::{cpu::PinCurrentCpu, panic, task::disable_preempt};
use ostd::{cpu::PinCurrentCpu, panic, task::disable_preempt, util::id_set::Id};
use super::Thread;

View File

@ -161,7 +161,7 @@ impl UserContextApiInternal for UserContext {
}
Trap::Interrupt(Interrupt::SupervisorExternal) => {
// No races because we are in IRQs.
let current_cpu = CpuId::current_racy().as_usize() as u32;
let current_cpu = CpuId::current_racy().into();
while let Some(hw_irq_line) =
IRQ_CHIP.get().unwrap().claim_interrupt(current_cpu)
{

View File

@ -80,11 +80,7 @@ impl IrqChip {
plic.set_priority(interrupt_source_in_fdt.interrupt, 1);
// FIXME: Here we only enable external insterrupt on the BSP. We should
// enable it on APs as well when SMP is supported.
plic.set_interrupt_enabled(
CpuId::bsp().as_usize() as u32,
interrupt_source_in_fdt.interrupt,
true,
);
plic.set_interrupt_enabled(CpuId::bsp().into(), interrupt_source_in_fdt.interrupt, true);
Ok(MappedIrqLine {
irq_line,
@ -137,7 +133,7 @@ impl IrqChip {
// FIXME: Here we only disable external insterrupt on the BSP. We should
// disable it on APs as well when SMP is supported.
plic.set_interrupt_enabled(CpuId::bsp().as_usize() as u32, *interrupt, false);
plic.set_interrupt_enabled(CpuId::bsp().into(), *interrupt, false);
plic.set_priority(*interrupt, 0);
plic.unmap_interrupt_source(*interrupt);
}

View File

@ -51,7 +51,7 @@ impl HwIrqLine {
InterruptSource::External(interrupt_source_on_chip) => {
IRQ_CHIP.get().unwrap().complete_interrupt(
// No races because we are in IRQs.
CpuId::current_racy().as_usize() as u32,
CpuId::current_racy().into(),
*interrupt_source_on_chip,
);
}

View File

@ -45,7 +45,7 @@ extern "C" fn trap_handler(f: &mut TrapFrame) {
}
Interrupt::SupervisorExternal => {
// No races because we are in IRQs.
let current_cpu = CpuId::current_racy().as_usize() as u32;
let current_cpu = CpuId::current_racy().into();
while let Some(hw_irq_line) = IRQ_CHIP.get().unwrap().claim_interrupt(current_cpu) {
call_irq_callback_functions(f, &hw_irq_line, PrivilegeLevel::Kernel);
}

View File

@ -14,6 +14,7 @@ use crate::{
},
sync::SpinLock,
task::Task,
util::id_set::Id,
};
static AP_BOOT_INFO: Once<ApBootInfo> = Once::new();

View File

@ -36,11 +36,6 @@ impl CpuId {
// SAFETY: There is at least one CPU.
Self(bsp_raw_cpu_id)
}
/// Converts the CPU ID to an `usize`.
pub const fn as_usize(self) -> usize {
self.0 as usize
}
}
impl From<CpuId> for u32 {

View File

@ -11,6 +11,7 @@ use crate::{
cpu::{all_cpus, num_cpus, CpuId, PinCurrentCpu},
irq::DisabledLocalIrqGuard,
mm::{paddr_to_vaddr, FrameAllocOptions, HasPaddr, Segment, Vaddr, PAGE_SIZE},
util::id_set::Id,
Result,
};

View File

@ -57,6 +57,7 @@ use super::CpuId;
use crate::{
irq::DisabledLocalIrqGuard,
mm::{frame::allocator, paddr_to_vaddr, Paddr, PAGE_SIZE},
util::id_set::Id,
};
/// Dynamically-allocated CPU-local objects.

View File

@ -10,7 +10,7 @@
use core::marker::PhantomData;
use super::{AnyStorage, CpuLocal, __cpu_local_end, __cpu_local_start};
use crate::{arch, cpu::CpuId, irq::DisabledLocalIrqGuard};
use crate::{arch, cpu::CpuId, irq::DisabledLocalIrqGuard, util::id_set::Id};
/// Defines a statically-allocated CPU-local variable.
///

View File

@ -18,6 +18,7 @@ use crate::{
cpu_local,
irq::{self, IrqLine},
sync::SpinLock,
util::id_set::Id,
};
/// Executes a function on other processors.

View File

@ -9,6 +9,7 @@ use crate::{
cpu::{num_cpus, CpuId, PinCurrentCpu},
sync::SpinLock,
task::{disable_preempt, Task},
util::id_set::Id,
};
pub fn init() {

View File

@ -40,7 +40,7 @@ impl AtomicCpuId {
self.0
.compare_exchange(
Self::NONE,
cpu_id.as_usize() as u32,
cpu_id.into(),
Ordering::Relaxed,
Ordering::Relaxed,
)
@ -50,7 +50,7 @@ impl AtomicCpuId {
/// Sets the inner value of an `AtomicCpuId` anyway.
pub fn set_anyway(&self, cpu_id: CpuId) {
self.0.store(cpu_id.as_usize() as u32, Ordering::Relaxed);
self.0.store(cpu_id.into(), Ordering::Relaxed);
}
/// Sets the inner value of an `AtomicCpuId` to `AtomicCpuId::NONE`, i.e. makes

View File

@ -23,9 +23,11 @@ use core::{
fmt::Debug,
marker::PhantomData,
mem::size_of,
ops::{Bound, Range, RangeFrom, RangeFull, RangeTo, RangeToInclusive},
sync::atomic::{AtomicU64, Ordering},
};
use bitvec::{order::Lsb0, view::BitView};
use smallvec::SmallVec;
use crate::const_assert;
@ -73,6 +75,11 @@ pub unsafe trait Id: Copy + Clone + Debug + Eq + Into<u32> + PartialEq {
/// The number of unique IDs representable by this type.
fn cardinality() -> u32;
/// Returns an [`usize`] from the [`Id`]'s corresponding [`u32`].
fn as_usize(self) -> usize {
Into::<u32>::into(self) as usize
}
}
/// A set of IDs.
@ -241,30 +248,87 @@ impl<I: Id> IdSet<I> {
self.bits.fill(0);
}
/// Iterates over the IDs in the set.
/// Iterates over all IDs in the set.
///
/// The order of the iteration is guaranteed to be in ascending order.
/// The iteration is guaranteed to be in ascending order.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = I> + '_ {
let max_raw_id = I::cardinality() as usize;
self.bits
.iter()
.enumerate()
.flat_map(move |(part_idx, &part)| {
(0..BITS_PER_PART).filter_map(move |bit_idx| {
if (part & (1 << bit_idx)) != 0 {
let raw_id = part_idx * BITS_PER_PART + bit_idx;
debug_assert!(raw_id < max_raw_id);
// SAFETY: All bit 1s in the bitmap must be a valid ID.
let id = unsafe { I::new_unchecked(raw_id as u32) };
Some(id)
} else {
None
}
})
self.iter_in(..)
}
/// Iterates over the IDs in the set within the specified range.
///
/// The iteration is guaranteed to be in ascending order.
/// Only IDs that are both in the set and within the specified range will be returned.
pub fn iter_in<S: IdSetSlicer<I>>(&self, slicer: S) -> impl Iterator<Item = I> + '_ {
let (start, end) = slicer.to_range_bounds();
self.bits.view_bits::<Lsb0>()[start..end]
.iter_ones()
.map(move |offset| {
// SAFETY: `offset` is relative to the slice `[start..end]`,
// therefore `start + offset` is the absolute index of the bit.
// Since `offset` only iterates over relative positions of bit 1s, the
// resulting absolute index must refer to an active bit in `self.bits`.
unsafe { I::new_unchecked((start + offset) as u32) }
})
}
}
/// A trait that unifies all types that slice a portion of [`IdSet`].
pub trait IdSetSlicer<I: Id> {
/// Converts the index type to inclusive start and exclusive end bounds.
///
/// Returns `(start, end)` where:
/// - `start`: inclusive lower bound
/// - `end`: exclusive upper bound
fn to_range_bounds(self) -> (usize, usize);
}
// In the following implementations of `IdSetSlicer`, the `Id` values are upcast
// from `u32` to `usize`. So adding one is guaranteed to *not* overflow.
impl<I: Id> IdSetSlicer<I> for RangeTo<I> {
fn to_range_bounds(self) -> (usize, usize) {
(0, self.end.as_usize())
}
}
impl<I: Id> IdSetSlicer<I> for RangeFrom<I> {
fn to_range_bounds(self) -> (usize, usize) {
(self.start.as_usize(), I::cardinality() as usize)
}
}
impl<I: Id> IdSetSlicer<I> for Range<I> {
fn to_range_bounds(self) -> (usize, usize) {
(self.start.as_usize(), self.end.as_usize())
}
}
impl<I: Id> IdSetSlicer<I> for RangeFull {
fn to_range_bounds(self) -> (usize, usize) {
(0, I::cardinality() as usize)
}
}
impl<I: Id> IdSetSlicer<I> for RangeToInclusive<I> {
fn to_range_bounds(self) -> (usize, usize) {
(0, self.end.as_usize() + 1)
}
}
impl<I: Id> IdSetSlicer<I> for (Bound<I>, Bound<I>) {
fn to_range_bounds(self) -> (usize, usize) {
let (start_bound, end_bound) = self;
let start = match start_bound {
Bound::Included(id) => id.as_usize(),
Bound::Excluded(id) => id.as_usize() + 1,
Bound::Unbounded => 0,
};
let end = match end_bound {
Bound::Included(id) => id.as_usize() + 1,
Bound::Excluded(id) => id.as_usize(),
Bound::Unbounded => I::cardinality() as usize,
};
(start, end)
}
}
impl<I: Id> From<I> for IdSet<I> {
fn from(id: I) -> Self {
let mut set = Self::new_empty();
@ -648,4 +712,228 @@ mod id_set_tests {
assert!(set.is_empty());
assert_eq!(set.count(), 0);
}
#[ktest]
fn iter_in_range() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set.iter_in(TestId::new(1)..TestId::new(5)).collect();
assert_eq!(collected_ids, vec![TestId::new(1), TestId::new(2)],);
}
#[ktest]
fn iter_in_range_to() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set.iter_in(..TestId::new(5)).collect();
assert_eq!(
collected_ids,
vec![TestId::new(0), TestId::new(1), TestId::new(2)],
);
}
#[ktest]
fn iter_in_range_to_inclusive() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set.iter_in(..=TestId::new(5)).collect();
assert_eq!(
collected_ids,
vec![
TestId::new(0),
TestId::new(1),
TestId::new(2),
TestId::new(5)
],
);
}
#[ktest]
fn iter_in_range_from() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set.iter_in(TestId::new(2)..).collect();
assert_eq!(
collected_ids,
vec![TestId::new(2), TestId::new(5), TestId::new(6)],
);
}
#[ktest]
fn iter_in_range_full() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set.iter_in(..).collect();
assert_eq!(
collected_ids,
vec![
TestId::new(0),
TestId::new(1),
TestId::new(2),
TestId::new(5),
TestId::new(6)
],
);
}
#[ktest]
fn iter_in_bound_tuple_inclusive_exclusive() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set
.iter_in((
Bound::Included(TestId::new(1)),
Bound::Excluded(TestId::new(5)),
))
.collect();
assert_eq!(collected_ids, vec![TestId::new(1), TestId::new(2)],);
}
#[ktest]
fn iter_in_bound_tuple_exclusive_inclusive() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set
.iter_in((
Bound::Excluded(TestId::new(1)),
Bound::Included(TestId::new(5)),
))
.collect();
assert_eq!(collected_ids, vec![TestId::new(2), TestId::new(5)],);
}
#[ktest]
fn iter_in_unbounded_bounds() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set
.iter_in((Bound::Unbounded::<TestId>, Bound::Unbounded::<TestId>))
.collect();
assert_eq!(
collected_ids,
vec![
TestId::new(0),
TestId::new(1),
TestId::new(2),
TestId::new(5),
TestId::new(6)
],
);
}
#[ktest]
fn iter_in_half_unbounded() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
set.add(TestId::new(5));
set.add(TestId::new(6));
let collected_ids: Vec<TestId> = set
.iter_in((Bound::Included(TestId::new(2)), Bound::Unbounded::<TestId>))
.collect();
assert_eq!(
collected_ids,
vec![TestId::new(2), TestId::new(5), TestId::new(6)],
);
let collected_ids: Vec<TestId> = set
.iter_in((Bound::Unbounded::<TestId>, Bound::Included(TestId::new(2))))
.collect();
assert_eq!(
collected_ids,
vec![TestId::new(0), TestId::new(1), TestId::new(2)],
);
}
#[ktest]
fn iter_in_range_starts_after_last() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
let collected_ids: Vec<TestId> = set.iter_in(TestId::new(3)..).collect();
assert_eq!(collected_ids, vec![],);
}
#[ktest]
fn iter_in_range_ends_after_last() {
type TestId = MockId<7>;
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(TestId::new(0));
set.add(TestId::new(1));
set.add(TestId::new(2));
let collected_ids: Vec<TestId> = set.iter_in(..TestId::new(3)).collect();
assert_eq!(
collected_ids,
vec![TestId::new(0), TestId::new(1), TestId::new(2)],
);
}
#[ktest]
fn iter_in_range_next_part() {
type TestId = MockId<{ InnerPart::BITS }>;
let last_id = TestId::new(InnerPart::BITS - 1);
let mut set: IdSet<TestId> = IdSet::new_empty();
set.add(last_id);
let collected_ids: Vec<TestId> = set
.iter_in((Bound::Excluded(last_id), Bound::Included(last_id)))
.collect();
assert_eq!(collected_ids, vec![],);
}
}