Refactor IoMem acquisition to use appropriate cache policies across multiple components

This commit is contained in:
Zhe Tang 2025-11-14 02:41:30 +00:00 committed by Tate, Hongliang Tian
parent fb1cab9951
commit 70eda539df
6 changed files with 76 additions and 22 deletions

View File

@ -5,7 +5,7 @@ use alloc::{sync::Arc, vec::Vec};
use ostd::{
boot::boot_info,
io::IoMem,
mm::{HasSize, VmIo},
mm::{CachePolicy, HasSize, VmIo},
sync::Mutex,
Error, Result,
};
@ -96,7 +96,14 @@ pub(crate) fn init() {
let fb_size = framebuffer_arg.height.checked_mul(line_size).unwrap();
let fb_base = framebuffer_arg.address;
let io_mem = IoMem::acquire(fb_base..fb_base.checked_add(fb_size).unwrap()).unwrap();
// Use write-combining for framebuffer to enable faster write operations.
// Write-combining allows the CPU to combine multiple writes into fewer bus transactions,
// which is ideal for framebuffer access patterns (sequential writes).
let io_mem = IoMem::acquire_with_cache_policy(
fb_base..fb_base.checked_add(fb_size).unwrap(),
CachePolicy::WriteCombining,
)
.unwrap();
let default_cmap = FbCmap {
entries: Vec::new(),

View File

@ -229,6 +229,8 @@ impl MemoryBar {
/// Grants I/O memory access
pub fn io_mem(&self) -> &IoMem {
self.io_memory.call_once(|| {
// Use the `Uncacheable` cache policy for PCI device BARs by default.
// Device-specific drivers may remap with different cache policies if needed.
IoMem::acquire((self.base as usize)..((self.base + self.size) as usize)).unwrap()
})
}

View File

@ -319,7 +319,7 @@ impl Plic {
Self {
phandle,
io_mem: io_mem_builder.reserve(range),
io_mem: io_mem_builder.reserve(range, crate::mm::CachePolicy::Uncacheable),
hart_to_target_mapping,
interrupt_number_mappings: (0..num_interrupt_sources)
.map(|_| None)

View File

@ -178,7 +178,10 @@ impl IoApicAccess {
///
/// The caller must ensure that the base address is a valid I/O APIC base address.
pub(self) unsafe fn new(base_address: usize, io_mem_builder: &IoMemAllocatorBuilder) -> Self {
let io_mem = io_mem_builder.reserve(base_address..(base_address + Self::MMIO_SIZE));
let io_mem = io_mem_builder.reserve(
base_address..(base_address + Self::MMIO_SIZE),
crate::mm::CachePolicy::Uncacheable,
);
if_tdx_enabled!({
assert_eq!(

View File

@ -359,7 +359,10 @@ pub fn init(io_mem_builder: &IoMemAllocatorBuilder) -> Result<(), ApicInitError>
log::info!("xAPIC found!");
// SAFETY: xAPIC is present.
let base_address = unsafe { xapic::read_xapic_base_address() };
let io_mem = io_mem_builder.reserve(base_address..(base_address + xapic::XAPIC_MMIO_SIZE));
let io_mem = io_mem_builder.reserve(
base_address..(base_address + xapic::XAPIC_MMIO_SIZE),
crate::mm::CachePolicy::Uncacheable,
);
APIC_TYPE.call_once(|| ApicType::XApic(io_mem));
Ok(())
} else {

View File

@ -179,7 +179,10 @@ mod test {
use alloc::vec;
use super::{IoMemAllocator, IoMemAllocatorBuilder};
use crate::{mm::PAGE_SIZE, prelude::ktest};
use crate::{
mm::{CachePolicy, PAGE_SIZE},
prelude::ktest,
};
#[expect(clippy::reversed_empty_ranges)]
#[expect(clippy::single_range_in_vec_init)]
@ -189,18 +192,36 @@ mod test {
let allocator =
unsafe { IoMemAllocator::new(IoMemAllocatorBuilder::new(range).allocators) };
assert!(allocator.acquire(0..0).is_none());
assert!(allocator.acquire(usize::MAX..0).is_none());
assert!(allocator.acquire(0..0, CachePolicy::Uncacheable).is_none());
assert!(allocator
.acquire(usize::MAX..0, CachePolicy::Uncacheable)
.is_none());
assert!(allocator.acquire(0x4000_0000..0x4000_0000).is_none());
assert!(allocator.acquire(0x4000_1000..0x4000_1000).is_none());
assert!(allocator.acquire(0x41ff_0000..0x41ff_0000).is_none());
assert!(allocator.acquire(0x4200_0000..0x4200_0000).is_none());
assert!(allocator
.acquire(0x4000_0000..0x4000_0000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x4000_1000..0x4000_1000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x41ff_0000..0x41ff_0000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x4200_0000..0x4200_0000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator.acquire(0x4000_1000..0x4000_0000).is_none());
assert!(allocator.acquire(0x4000_2000..0x4000_1000).is_none());
assert!(allocator.acquire(0x41ff_f000..0x41ff_e000).is_none());
assert!(allocator.acquire(0x4200_0000..0x41ff_f000).is_none());
assert!(allocator
.acquire(0x4000_1000..0x4000_0000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x4000_2000..0x4000_1000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x41ff_f000..0x41ff_e000, CachePolicy::Uncacheable)
.is_none());
assert!(allocator
.acquire(0x4200_0000..0x41ff_f000, CachePolicy::Uncacheable)
.is_none());
}
#[ktest]
@ -216,24 +237,42 @@ mod test {
unsafe { IoMemAllocator::new(IoMemAllocatorBuilder::new(range).allocators) };
assert!(allocator
.acquire((io_mem_region_a.start - 1)..io_mem_region_a.start)
.acquire(
(io_mem_region_a.start - 1)..io_mem_region_a.start,
CachePolicy::Uncacheable
)
.is_none());
assert!(allocator
.acquire(io_mem_region_a.start..(io_mem_region_a.start + 1))
.acquire(
io_mem_region_a.start..(io_mem_region_a.start + 1),
CachePolicy::Uncacheable
)
.is_some());
assert!(allocator
.acquire((io_mem_region_a.end + 1)..(io_mem_region_b.start - 1))
.acquire(
(io_mem_region_a.end + 1)..(io_mem_region_b.start - 1),
CachePolicy::Uncacheable
)
.is_none());
assert!(allocator
.acquire((io_mem_region_a.end - 1)..(io_mem_region_b.start + 1))
.acquire(
(io_mem_region_a.end - 1)..(io_mem_region_b.start + 1),
CachePolicy::Uncacheable
)
.is_none());
assert!(allocator
.acquire((io_mem_region_a.end - 1)..io_mem_region_a.end)
.acquire(
(io_mem_region_a.end - 1)..io_mem_region_a.end,
CachePolicy::Uncacheable
)
.is_some());
assert!(allocator
.acquire(io_mem_region_a.end..(io_mem_region_a.end + 1))
.acquire(
io_mem_region_a.end..(io_mem_region_a.end + 1),
CachePolicy::Uncacheable
)
.is_none());
}
}