diff --git a/kernel/src/arch/loongarch/cpu.rs b/kernel/src/arch/loongarch/cpu.rs index a6c5a6121..4c2c5f2a3 100644 --- a/kernel/src/arch/loongarch/cpu.rs +++ b/kernel/src/arch/loongarch/cpu.rs @@ -173,7 +173,7 @@ impl CpuInformation { /// Constructs the information for the current CPU. pub fn new(guard: &DisabledPreemptGuard) -> Self { Self { - processor: guard.current_cpu().as_usize() as u32, + processor: guard.current_cpu().into(), } } } diff --git a/kernel/src/arch/riscv/cpu.rs b/kernel/src/arch/riscv/cpu.rs index 7b5cab29d..9ccdbc5be 100644 --- a/kernel/src/arch/riscv/cpu.rs +++ b/kernel/src/arch/riscv/cpu.rs @@ -169,7 +169,7 @@ impl CpuInformation { /// Constructs the information for the current CPU. pub fn new(guard: &DisabledPreemptGuard) -> Self { Self { - processor: guard.current_cpu().as_usize() as u32, + processor: guard.current_cpu().into(), } } } diff --git a/kernel/src/arch/x86/cpu.rs b/kernel/src/arch/x86/cpu.rs index d28620730..a270d3383 100644 --- a/kernel/src/arch/x86/cpu.rs +++ b/kernel/src/arch/x86/cpu.rs @@ -233,7 +233,7 @@ impl CpuInformation { /// Constructs the information for the current CPU. pub fn new(guard: &DisabledPreemptGuard) -> Self { let mut result = Self { - processor: guard.current_cpu().as_usize() as u32, + processor: guard.current_cpu().into(), vendor_id: "unknown".to_owned(), cpu_family: 0, model: 0, diff --git a/kernel/src/fs/procfs/stat.rs b/kernel/src/fs/procfs/stat.rs index 541e12643..0f96034b3 100644 --- a/kernel/src/fs/procfs/stat.rs +++ b/kernel/src/fs/procfs/stat.rs @@ -10,6 +10,7 @@ use core::fmt::Write; use aster_softirq::{ iter_irq_counts_across_all_cpus, iter_softirq_counts_across_all_cpus, softirq_id::*, }; +use ostd::util::id_set::Id; use crate::{ fs::{ diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index b85d93021..dde2d9211 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -30,6 +30,7 @@ use ostd::{ arch::qemu::{exit_qemu, QemuExitCode}, boot::boot_info, cpu::CpuId, + util::id_set::Id, }; use process::{spawn_init_process, Process}; use sched::SchedPolicy; diff --git a/kernel/src/sched/sched_class/mod.rs b/kernel/src/sched/sched_class/mod.rs index 670932ff8..19ca274f7 100644 --- a/kernel/src/sched/sched_class/mod.rs +++ b/kernel/src/sched/sched_class/mod.rs @@ -5,11 +5,11 @@ #![warn(unused)] use alloc::{boxed::Box, sync::Arc}; -use core::{fmt, sync::atomic::Ordering}; +use core::{fmt, ops::Bound, sync::atomic::Ordering}; use ostd::{ arch::read_tsc as sched_clock, - cpu::{all_cpus, CpuId, PinCurrentCpu}, + cpu::{all_cpus, CpuId, CpuSet, PinCurrentCpu}, irq::disable_local, sync::{LocalIrqDisabled, SpinLock}, task::{ @@ -19,6 +19,7 @@ use ostd::{ }, AtomicCpuId, Task, }, + util::id_set::Id, }; use super::{ @@ -286,28 +287,14 @@ impl ClassScheduler { return last_cpu; } debug_assert!(flags == EnqueueFlags::Spawn); + let guard = disable_local(); - let affinity = thread.atomic_cpu_affinity().load(Ordering::Relaxed); + let mut selected = guard.current_cpu(); let mut minimum_load = u32::MAX; - let last_chosen = match self.last_chosen_cpu.get() { - Some(cpu) => cpu.as_usize() as isize, - None => -1, - }; - // Simulate a round-robin selection starting from the last chosen CPU. - // - // It still checks every CPU to find the one with the minimum load, but - // avoids keeping selecting the same CPU when there are multiple equally - // idle CPUs. - let affinity_iter = affinity - .iter() - .filter(|&cpu| cpu.as_usize() as isize > last_chosen) - .chain( - affinity - .iter() - .filter(|&cpu| cpu.as_usize() as isize <= last_chosen), - ); - for candidate in affinity_iter { + + // Set `selected` as `candidate` if the candidate's load is smaller. + let test_candidate = |candidate: CpuId| { let PerCpuLoadStats { queue_len, .. } = self.rqs[candidate.as_usize()].lock().load_stats(); let load = queue_len; @@ -315,10 +302,36 @@ impl ClassScheduler { minimum_load = load; selected = candidate; } + }; + + let affinity = thread.atomic_cpu_affinity().load(Ordering::Relaxed); + match self.last_chosen_cpu.get() { + Some(cpu) => { + // Perform a round-robin selection starting after the last chosen CPU. + // + // It still checks every CPU in the affinity set to find the one with the + // minimum load, but avoids selecting the same CPU again in case of a tie. + Self::cycle_after(cpu, &affinity).for_each(test_candidate) + } + None => affinity.iter().for_each(test_candidate), } + self.last_chosen_cpu.set_anyway(selected); selected } + + /// Returns a cycling iterator over the CPUs in the [`CpuSet`], starting *after* + /// the given [`CpuId`]. + /// + /// The iteration order is ascending up to the wrapping point, after which it + /// continues from the first CPU in the set in ascending order again. + /// + /// If the given [`CpuId`] is in the set, it will be the last element yielded. + fn cycle_after(cpu: CpuId, cpu_set: &CpuSet) -> impl Iterator + '_ { + cpu_set + .iter_in((Bound::Excluded(cpu), Bound::Unbounded)) + .chain(cpu_set.iter_in(..=cpu)) + } } impl PerCpuClassRqSet { diff --git a/kernel/src/syscall/getcpu.rs b/kernel/src/syscall/getcpu.rs index fd499d1b2..8e653c128 100644 --- a/kernel/src/syscall/getcpu.rs +++ b/kernel/src/syscall/getcpu.rs @@ -9,7 +9,7 @@ pub fn sys_getcpu(cpu: Vaddr, node: Vaddr, _tcache: Vaddr, ctx: &Context) -> Res // The third argument `tcache` is unused since Linux 2.6.24, so we ignore it. // The system call itself is inherently racy, so using `current_racy` here should be fine. - let current_cpu = CpuId::current_racy().as_usize() as u32; + let current_cpu: u32 = CpuId::current_racy().into(); // TODO: Support NUMA. let current_node = 0u32; diff --git a/kernel/src/syscall/sched_affinity.rs b/kernel/src/syscall/sched_affinity.rs index ee35bc646..270886c8e 100644 --- a/kernel/src/syscall/sched_affinity.rs +++ b/kernel/src/syscall/sched_affinity.rs @@ -2,7 +2,10 @@ use core::{cmp, sync::atomic::Ordering}; -use ostd::cpu::{num_cpus, CpuId, CpuSet}; +use ostd::{ + cpu::{num_cpus, CpuId, CpuSet}, + util::id_set::Id, +}; use super::SyscallReturn; use crate::{prelude::*, process::posix_thread::thread_table, thread::Tid}; diff --git a/kernel/src/thread/oops.rs b/kernel/src/thread/oops.rs index f43adebfc..956566d3f 100644 --- a/kernel/src/thread/oops.rs +++ b/kernel/src/thread/oops.rs @@ -22,7 +22,7 @@ use core::{ sync::atomic::{AtomicBool, AtomicUsize, Ordering}, }; -use ostd::{cpu::PinCurrentCpu, panic, task::disable_preempt}; +use ostd::{cpu::PinCurrentCpu, panic, task::disable_preempt, util::id_set::Id}; use super::Thread; diff --git a/ostd/src/arch/riscv/cpu/context.rs b/ostd/src/arch/riscv/cpu/context.rs index bd752d3d0..9443ea8d8 100644 --- a/ostd/src/arch/riscv/cpu/context.rs +++ b/ostd/src/arch/riscv/cpu/context.rs @@ -161,7 +161,7 @@ impl UserContextApiInternal for UserContext { } Trap::Interrupt(Interrupt::SupervisorExternal) => { // No races because we are in IRQs. - let current_cpu = CpuId::current_racy().as_usize() as u32; + let current_cpu = CpuId::current_racy().into(); while let Some(hw_irq_line) = IRQ_CHIP.get().unwrap().claim_interrupt(current_cpu) { diff --git a/ostd/src/arch/riscv/irq/chip/mod.rs b/ostd/src/arch/riscv/irq/chip/mod.rs index 8c22fa78e..471338c64 100644 --- a/ostd/src/arch/riscv/irq/chip/mod.rs +++ b/ostd/src/arch/riscv/irq/chip/mod.rs @@ -80,11 +80,7 @@ impl IrqChip { plic.set_priority(interrupt_source_in_fdt.interrupt, 1); // FIXME: Here we only enable external insterrupt on the BSP. We should // enable it on APs as well when SMP is supported. - plic.set_interrupt_enabled( - CpuId::bsp().as_usize() as u32, - interrupt_source_in_fdt.interrupt, - true, - ); + plic.set_interrupt_enabled(CpuId::bsp().into(), interrupt_source_in_fdt.interrupt, true); Ok(MappedIrqLine { irq_line, @@ -137,7 +133,7 @@ impl IrqChip { // FIXME: Here we only disable external insterrupt on the BSP. We should // disable it on APs as well when SMP is supported. - plic.set_interrupt_enabled(CpuId::bsp().as_usize() as u32, *interrupt, false); + plic.set_interrupt_enabled(CpuId::bsp().into(), *interrupt, false); plic.set_priority(*interrupt, 0); plic.unmap_interrupt_source(*interrupt); } diff --git a/ostd/src/arch/riscv/irq/mod.rs b/ostd/src/arch/riscv/irq/mod.rs index b8390e0d0..f27d803ea 100644 --- a/ostd/src/arch/riscv/irq/mod.rs +++ b/ostd/src/arch/riscv/irq/mod.rs @@ -51,7 +51,7 @@ impl HwIrqLine { InterruptSource::External(interrupt_source_on_chip) => { IRQ_CHIP.get().unwrap().complete_interrupt( // No races because we are in IRQs. - CpuId::current_racy().as_usize() as u32, + CpuId::current_racy().into(), *interrupt_source_on_chip, ); } diff --git a/ostd/src/arch/riscv/trap/mod.rs b/ostd/src/arch/riscv/trap/mod.rs index cbe59f1b7..1940ad593 100644 --- a/ostd/src/arch/riscv/trap/mod.rs +++ b/ostd/src/arch/riscv/trap/mod.rs @@ -45,7 +45,7 @@ extern "C" fn trap_handler(f: &mut TrapFrame) { } Interrupt::SupervisorExternal => { // No races because we are in IRQs. - let current_cpu = CpuId::current_racy().as_usize() as u32; + let current_cpu = CpuId::current_racy().into(); while let Some(hw_irq_line) = IRQ_CHIP.get().unwrap().claim_interrupt(current_cpu) { call_irq_callback_functions(f, &hw_irq_line, PrivilegeLevel::Kernel); } diff --git a/ostd/src/boot/smp.rs b/ostd/src/boot/smp.rs index af7e471a7..7b0020579 100644 --- a/ostd/src/boot/smp.rs +++ b/ostd/src/boot/smp.rs @@ -14,6 +14,7 @@ use crate::{ }, sync::SpinLock, task::Task, + util::id_set::Id, }; static AP_BOOT_INFO: Once = Once::new(); diff --git a/ostd/src/cpu/id.rs b/ostd/src/cpu/id.rs index 5bae909d4..5b9066153 100644 --- a/ostd/src/cpu/id.rs +++ b/ostd/src/cpu/id.rs @@ -36,11 +36,6 @@ impl CpuId { // SAFETY: There is at least one CPU. Self(bsp_raw_cpu_id) } - - /// Converts the CPU ID to an `usize`. - pub const fn as_usize(self) -> usize { - self.0 as usize - } } impl From for u32 { diff --git a/ostd/src/cpu/local/dyn_cpu_local.rs b/ostd/src/cpu/local/dyn_cpu_local.rs index 007732904..73979e21e 100644 --- a/ostd/src/cpu/local/dyn_cpu_local.rs +++ b/ostd/src/cpu/local/dyn_cpu_local.rs @@ -11,6 +11,7 @@ use crate::{ cpu::{all_cpus, num_cpus, CpuId, PinCurrentCpu}, irq::DisabledLocalIrqGuard, mm::{paddr_to_vaddr, FrameAllocOptions, HasPaddr, Segment, Vaddr, PAGE_SIZE}, + util::id_set::Id, Result, }; diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index 43ee3e6f3..8e755677b 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -57,6 +57,7 @@ use super::CpuId; use crate::{ irq::DisabledLocalIrqGuard, mm::{frame::allocator, paddr_to_vaddr, Paddr, PAGE_SIZE}, + util::id_set::Id, }; /// Dynamically-allocated CPU-local objects. diff --git a/ostd/src/cpu/local/static_cpu_local.rs b/ostd/src/cpu/local/static_cpu_local.rs index c17af0a85..120d68446 100644 --- a/ostd/src/cpu/local/static_cpu_local.rs +++ b/ostd/src/cpu/local/static_cpu_local.rs @@ -10,7 +10,7 @@ use core::marker::PhantomData; use super::{AnyStorage, CpuLocal, __cpu_local_end, __cpu_local_start}; -use crate::{arch, cpu::CpuId, irq::DisabledLocalIrqGuard}; +use crate::{arch, cpu::CpuId, irq::DisabledLocalIrqGuard, util::id_set::Id}; /// Defines a statically-allocated CPU-local variable. /// diff --git a/ostd/src/smp.rs b/ostd/src/smp.rs index aaee28261..250c29444 100644 --- a/ostd/src/smp.rs +++ b/ostd/src/smp.rs @@ -18,6 +18,7 @@ use crate::{ cpu_local, irq::{self, IrqLine}, sync::SpinLock, + util::id_set::Id, }; /// Executes a function on other processors. diff --git a/ostd/src/task/scheduler/fifo_scheduler.rs b/ostd/src/task/scheduler/fifo_scheduler.rs index d9cd20fbf..9f1b605e9 100644 --- a/ostd/src/task/scheduler/fifo_scheduler.rs +++ b/ostd/src/task/scheduler/fifo_scheduler.rs @@ -9,6 +9,7 @@ use crate::{ cpu::{num_cpus, CpuId, PinCurrentCpu}, sync::SpinLock, task::{disable_preempt, Task}, + util::id_set::Id, }; pub fn init() { diff --git a/ostd/src/task/scheduler/info.rs b/ostd/src/task/scheduler/info.rs index 485fd2cea..28a10dd90 100644 --- a/ostd/src/task/scheduler/info.rs +++ b/ostd/src/task/scheduler/info.rs @@ -40,7 +40,7 @@ impl AtomicCpuId { self.0 .compare_exchange( Self::NONE, - cpu_id.as_usize() as u32, + cpu_id.into(), Ordering::Relaxed, Ordering::Relaxed, ) @@ -50,7 +50,7 @@ impl AtomicCpuId { /// Sets the inner value of an `AtomicCpuId` anyway. pub fn set_anyway(&self, cpu_id: CpuId) { - self.0.store(cpu_id.as_usize() as u32, Ordering::Relaxed); + self.0.store(cpu_id.into(), Ordering::Relaxed); } /// Sets the inner value of an `AtomicCpuId` to `AtomicCpuId::NONE`, i.e. makes diff --git a/ostd/src/util/id_set.rs b/ostd/src/util/id_set.rs index 0cee0d1d6..bff111758 100644 --- a/ostd/src/util/id_set.rs +++ b/ostd/src/util/id_set.rs @@ -23,9 +23,11 @@ use core::{ fmt::Debug, marker::PhantomData, mem::size_of, + ops::{Bound, Range, RangeFrom, RangeFull, RangeTo, RangeToInclusive}, sync::atomic::{AtomicU64, Ordering}, }; +use bitvec::{order::Lsb0, view::BitView}; use smallvec::SmallVec; use crate::const_assert; @@ -73,6 +75,11 @@ pub unsafe trait Id: Copy + Clone + Debug + Eq + Into + PartialEq { /// The number of unique IDs representable by this type. fn cardinality() -> u32; + + /// Returns an [`usize`] from the [`Id`]'s corresponding [`u32`]. + fn as_usize(self) -> usize { + Into::::into(self) as usize + } } /// A set of IDs. @@ -241,30 +248,87 @@ impl IdSet { self.bits.fill(0); } - /// Iterates over the IDs in the set. + /// Iterates over all IDs in the set. /// - /// The order of the iteration is guaranteed to be in ascending order. + /// The iteration is guaranteed to be in ascending order. + #[inline] pub fn iter(&self) -> impl Iterator + '_ { - let max_raw_id = I::cardinality() as usize; - self.bits - .iter() - .enumerate() - .flat_map(move |(part_idx, &part)| { - (0..BITS_PER_PART).filter_map(move |bit_idx| { - if (part & (1 << bit_idx)) != 0 { - let raw_id = part_idx * BITS_PER_PART + bit_idx; - debug_assert!(raw_id < max_raw_id); - // SAFETY: All bit 1s in the bitmap must be a valid ID. - let id = unsafe { I::new_unchecked(raw_id as u32) }; - Some(id) - } else { - None - } - }) + self.iter_in(..) + } + + /// Iterates over the IDs in the set within the specified range. + /// + /// The iteration is guaranteed to be in ascending order. + /// Only IDs that are both in the set and within the specified range will be returned. + pub fn iter_in>(&self, slicer: S) -> impl Iterator + '_ { + let (start, end) = slicer.to_range_bounds(); + + self.bits.view_bits::()[start..end] + .iter_ones() + .map(move |offset| { + // SAFETY: `offset` is relative to the slice `[start..end]`, + // therefore `start + offset` is the absolute index of the bit. + // Since `offset` only iterates over relative positions of bit 1s, the + // resulting absolute index must refer to an active bit in `self.bits`. + unsafe { I::new_unchecked((start + offset) as u32) } }) } } +/// A trait that unifies all types that slice a portion of [`IdSet`]. +pub trait IdSetSlicer { + /// Converts the index type to inclusive start and exclusive end bounds. + /// + /// Returns `(start, end)` where: + /// - `start`: inclusive lower bound + /// - `end`: exclusive upper bound + fn to_range_bounds(self) -> (usize, usize); +} + +// In the following implementations of `IdSetSlicer`, the `Id` values are upcast +// from `u32` to `usize`. So adding one is guaranteed to *not* overflow. +impl IdSetSlicer for RangeTo { + fn to_range_bounds(self) -> (usize, usize) { + (0, self.end.as_usize()) + } +} +impl IdSetSlicer for RangeFrom { + fn to_range_bounds(self) -> (usize, usize) { + (self.start.as_usize(), I::cardinality() as usize) + } +} +impl IdSetSlicer for Range { + fn to_range_bounds(self) -> (usize, usize) { + (self.start.as_usize(), self.end.as_usize()) + } +} +impl IdSetSlicer for RangeFull { + fn to_range_bounds(self) -> (usize, usize) { + (0, I::cardinality() as usize) + } +} +impl IdSetSlicer for RangeToInclusive { + fn to_range_bounds(self) -> (usize, usize) { + (0, self.end.as_usize() + 1) + } +} +impl IdSetSlicer for (Bound, Bound) { + fn to_range_bounds(self) -> (usize, usize) { + let (start_bound, end_bound) = self; + let start = match start_bound { + Bound::Included(id) => id.as_usize(), + Bound::Excluded(id) => id.as_usize() + 1, + Bound::Unbounded => 0, + }; + let end = match end_bound { + Bound::Included(id) => id.as_usize() + 1, + Bound::Excluded(id) => id.as_usize(), + Bound::Unbounded => I::cardinality() as usize, + }; + (start, end) + } +} + impl From for IdSet { fn from(id: I) -> Self { let mut set = Self::new_empty(); @@ -648,4 +712,228 @@ mod id_set_tests { assert!(set.is_empty()); assert_eq!(set.count(), 0); } + + #[ktest] + fn iter_in_range() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set.iter_in(TestId::new(1)..TestId::new(5)).collect(); + assert_eq!(collected_ids, vec![TestId::new(1), TestId::new(2)],); + } + + #[ktest] + fn iter_in_range_to() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set.iter_in(..TestId::new(5)).collect(); + assert_eq!( + collected_ids, + vec![TestId::new(0), TestId::new(1), TestId::new(2)], + ); + } + + #[ktest] + fn iter_in_range_to_inclusive() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set.iter_in(..=TestId::new(5)).collect(); + assert_eq!( + collected_ids, + vec![ + TestId::new(0), + TestId::new(1), + TestId::new(2), + TestId::new(5) + ], + ); + } + + #[ktest] + fn iter_in_range_from() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set.iter_in(TestId::new(2)..).collect(); + assert_eq!( + collected_ids, + vec![TestId::new(2), TestId::new(5), TestId::new(6)], + ); + } + + #[ktest] + fn iter_in_range_full() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set.iter_in(..).collect(); + assert_eq!( + collected_ids, + vec![ + TestId::new(0), + TestId::new(1), + TestId::new(2), + TestId::new(5), + TestId::new(6) + ], + ); + } + + #[ktest] + fn iter_in_bound_tuple_inclusive_exclusive() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set + .iter_in(( + Bound::Included(TestId::new(1)), + Bound::Excluded(TestId::new(5)), + )) + .collect(); + assert_eq!(collected_ids, vec![TestId::new(1), TestId::new(2)],); + } + + #[ktest] + fn iter_in_bound_tuple_exclusive_inclusive() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set + .iter_in(( + Bound::Excluded(TestId::new(1)), + Bound::Included(TestId::new(5)), + )) + .collect(); + assert_eq!(collected_ids, vec![TestId::new(2), TestId::new(5)],); + } + + #[ktest] + fn iter_in_unbounded_bounds() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set + .iter_in((Bound::Unbounded::, Bound::Unbounded::)) + .collect(); + assert_eq!( + collected_ids, + vec![ + TestId::new(0), + TestId::new(1), + TestId::new(2), + TestId::new(5), + TestId::new(6) + ], + ); + } + + #[ktest] + fn iter_in_half_unbounded() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + set.add(TestId::new(5)); + set.add(TestId::new(6)); + + let collected_ids: Vec = set + .iter_in((Bound::Included(TestId::new(2)), Bound::Unbounded::)) + .collect(); + assert_eq!( + collected_ids, + vec![TestId::new(2), TestId::new(5), TestId::new(6)], + ); + + let collected_ids: Vec = set + .iter_in((Bound::Unbounded::, Bound::Included(TestId::new(2)))) + .collect(); + assert_eq!( + collected_ids, + vec![TestId::new(0), TestId::new(1), TestId::new(2)], + ); + } + + #[ktest] + fn iter_in_range_starts_after_last() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + + let collected_ids: Vec = set.iter_in(TestId::new(3)..).collect(); + assert_eq!(collected_ids, vec![],); + } + + #[ktest] + fn iter_in_range_ends_after_last() { + type TestId = MockId<7>; + let mut set: IdSet = IdSet::new_empty(); + set.add(TestId::new(0)); + set.add(TestId::new(1)); + set.add(TestId::new(2)); + + let collected_ids: Vec = set.iter_in(..TestId::new(3)).collect(); + assert_eq!( + collected_ids, + vec![TestId::new(0), TestId::new(1), TestId::new(2)], + ); + } + + #[ktest] + fn iter_in_range_next_part() { + type TestId = MockId<{ InnerPart::BITS }>; + let last_id = TestId::new(InnerPart::BITS - 1); + + let mut set: IdSet = IdSet::new_empty(); + set.add(last_id); + + let collected_ids: Vec = set + .iter_in((Bound::Excluded(last_id), Bound::Included(last_id))) + .collect(); + assert_eq!(collected_ids, vec![],); + } }