2024-05-15 05:41:30 +00:00
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
|
|
|
|
|
//! Because that the page table implementation requires metadata initialized
|
|
|
|
|
//! and mapped, the boot page table is needed to do early stage page table setup
|
|
|
|
|
//! in order to initialize the running phase page tables.
|
|
|
|
|
|
|
|
|
|
use alloc::vec::Vec;
|
2024-08-22 10:48:33 +00:00
|
|
|
use core::{
|
|
|
|
|
result::Result,
|
|
|
|
|
sync::atomic::{AtomicU32, Ordering},
|
|
|
|
|
};
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
use super::{pte_index, PageTableEntryTrait};
|
2024-05-26 16:25:59 +00:00
|
|
|
use crate::{
|
|
|
|
|
arch::mm::{PageTableEntry, PagingConsts},
|
2024-08-22 10:48:33 +00:00
|
|
|
cpu::num_cpus,
|
2024-10-11 10:12:48 +00:00
|
|
|
cpu_local_cell,
|
2024-05-26 17:53:44 +00:00
|
|
|
mm::{
|
2024-12-25 14:53:24 +00:00
|
|
|
frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr,
|
|
|
|
|
PageProperty, PagingConstsTrait, Vaddr, PAGE_SIZE,
|
2024-05-26 16:25:59 +00:00
|
|
|
},
|
2024-08-22 10:48:33 +00:00
|
|
|
sync::SpinLock,
|
2024-05-15 05:41:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
type FrameNumber = usize;
|
|
|
|
|
|
2024-08-22 10:48:33 +00:00
|
|
|
/// The accessor to the boot page table singleton [`BootPageTable`].
|
|
|
|
|
///
|
|
|
|
|
/// The user should provide a closure to access the boot page table. The
|
|
|
|
|
/// function will acquire the lock and call the closure with a mutable
|
|
|
|
|
/// reference to the boot page table as the argument.
|
|
|
|
|
///
|
|
|
|
|
/// The boot page table will be dropped when there's no CPU activating it.
|
|
|
|
|
/// This function will return an [`Err`] if the boot page table is dropped.
|
2024-12-13 13:23:52 +00:00
|
|
|
pub(crate) fn with_borrow<F, R>(f: F) -> Result<R, ()>
|
2024-08-22 10:48:33 +00:00
|
|
|
where
|
2024-12-13 13:23:52 +00:00
|
|
|
F: FnOnce(&mut BootPageTable) -> R,
|
2024-08-22 10:48:33 +00:00
|
|
|
{
|
|
|
|
|
let mut boot_pt = BOOT_PAGE_TABLE.lock();
|
|
|
|
|
|
2024-10-11 10:12:48 +00:00
|
|
|
if IS_DISMISSED.load() {
|
2024-08-22 10:48:33 +00:00
|
|
|
return Err(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Lazy initialization.
|
|
|
|
|
if boot_pt.is_none() {
|
|
|
|
|
// SAFETY: This function is called only once.
|
|
|
|
|
*boot_pt = Some(unsafe { BootPageTable::from_current_pt() });
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-13 13:23:52 +00:00
|
|
|
let r = f(boot_pt.as_mut().unwrap());
|
2024-08-22 10:48:33 +00:00
|
|
|
|
2024-12-13 13:23:52 +00:00
|
|
|
Ok(r)
|
2024-08-22 10:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Dismiss the boot page table.
|
|
|
|
|
///
|
|
|
|
|
/// By calling it on a CPU, the caller claims that the boot page table is no
|
|
|
|
|
/// longer needed on this CPU.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// The caller should ensure that:
|
|
|
|
|
/// - another legitimate page table is activated on this CPU;
|
|
|
|
|
/// - this function should be called only once per CPU;
|
2024-10-11 10:12:48 +00:00
|
|
|
/// - no [`with`] calls are performed on this CPU after this dismissal;
|
|
|
|
|
/// - no [`with`] calls are performed on this CPU after the activation of
|
|
|
|
|
/// another page table and before this dismissal.
|
2024-08-22 10:48:33 +00:00
|
|
|
pub(crate) unsafe fn dismiss() {
|
2024-10-11 10:12:48 +00:00
|
|
|
IS_DISMISSED.store(true);
|
2024-09-23 03:51:53 +00:00
|
|
|
if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 {
|
2024-08-22 10:48:33 +00:00
|
|
|
BOOT_PAGE_TABLE.lock().take();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// The boot page table singleton instance.
|
|
|
|
|
static BOOT_PAGE_TABLE: SpinLock<Option<BootPageTable>> = SpinLock::new(None);
|
|
|
|
|
/// If it reaches the number of CPUs, the boot page table will be dropped.
|
|
|
|
|
static DISMISS_COUNT: AtomicU32 = AtomicU32::new(0);
|
2024-10-11 10:12:48 +00:00
|
|
|
cpu_local_cell! {
|
|
|
|
|
/// If the boot page table is dismissed on this CPU.
|
|
|
|
|
static IS_DISMISSED: bool = false;
|
|
|
|
|
}
|
2024-08-22 10:48:33 +00:00
|
|
|
|
|
|
|
|
/// A simple boot page table singleton for boot stage mapping management.
|
2024-05-15 05:41:30 +00:00
|
|
|
/// If applicable, the boot page table could track the lifetime of page table
|
|
|
|
|
/// frames that are set up by the firmware, loader or the setup code.
|
2024-05-26 16:25:59 +00:00
|
|
|
pub struct BootPageTable<
|
|
|
|
|
E: PageTableEntryTrait = PageTableEntry,
|
|
|
|
|
C: PagingConstsTrait = PagingConsts,
|
|
|
|
|
> {
|
2024-05-15 05:41:30 +00:00
|
|
|
root_pt: FrameNumber,
|
|
|
|
|
// The frames allocated for this page table are not tracked with
|
2024-05-26 17:53:44 +00:00
|
|
|
// metadata [`crate::mm::frame::meta`]. Here is a record of it
|
2024-05-15 05:41:30 +00:00
|
|
|
// for deallocation.
|
|
|
|
|
frames: Vec<FrameNumber>,
|
|
|
|
|
_pretend_to_use: core::marker::PhantomData<(E, C)>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
2024-08-22 10:48:33 +00:00
|
|
|
/// Creates a new boot page table from the current page table root
|
|
|
|
|
/// physical address.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// This function should be called only once in the initialization phase.
|
|
|
|
|
/// Otherwise, It would lead to double-drop of the page table frames set up
|
|
|
|
|
/// by the firmware, loader or the setup code.
|
|
|
|
|
unsafe fn from_current_pt() -> Self {
|
2024-05-26 16:25:59 +00:00
|
|
|
let root_paddr = crate::arch::mm::current_page_table_paddr();
|
2024-05-15 05:41:30 +00:00
|
|
|
Self {
|
|
|
|
|
root_pt: root_paddr / C::BASE_PAGE_SIZE,
|
|
|
|
|
frames: Vec::new(),
|
|
|
|
|
_pretend_to_use: core::marker::PhantomData,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-13 13:23:52 +00:00
|
|
|
/// Returns the root physical address of the boot page table.
|
|
|
|
|
pub(crate) fn root_address(&self) -> Paddr {
|
|
|
|
|
self.root_pt * C::BASE_PAGE_SIZE
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-30 11:25:58 +00:00
|
|
|
/// Maps a base page to a frame.
|
2024-06-12 09:31:33 +00:00
|
|
|
///
|
|
|
|
|
/// # Panics
|
|
|
|
|
///
|
2024-05-15 05:41:30 +00:00
|
|
|
/// This function will panic if the page is already mapped.
|
2024-06-12 09:31:33 +00:00
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// This function is unsafe because it can cause undefined behavior if the caller
|
|
|
|
|
/// maps a page in the kernel address space.
|
2024-06-12 08:55:59 +00:00
|
|
|
pub unsafe fn map_base_page(&mut self, from: Vaddr, to: FrameNumber, prop: PageProperty) {
|
2024-05-15 05:41:30 +00:00
|
|
|
let mut pt = self.root_pt;
|
|
|
|
|
let mut level = C::NR_LEVELS;
|
|
|
|
|
// Walk to the last level of the page table.
|
|
|
|
|
while level > 1 {
|
|
|
|
|
let index = pte_index::<C>(from, level);
|
|
|
|
|
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
|
|
|
|
|
let pte = unsafe { pte_ptr.read() };
|
|
|
|
|
pt = if !pte.is_present() {
|
|
|
|
|
let frame = self.alloc_frame();
|
2024-05-16 16:42:36 +00:00
|
|
|
unsafe { pte_ptr.write(E::new_pt(frame * C::BASE_PAGE_SIZE)) };
|
2024-05-15 05:41:30 +00:00
|
|
|
frame
|
2024-05-16 16:42:36 +00:00
|
|
|
} else if pte.is_last(level) {
|
2024-05-15 05:41:30 +00:00
|
|
|
panic!("mapping an already mapped huge page in the boot page table");
|
|
|
|
|
} else {
|
|
|
|
|
pte.paddr() / C::BASE_PAGE_SIZE
|
|
|
|
|
};
|
|
|
|
|
level -= 1;
|
|
|
|
|
}
|
|
|
|
|
// Map the page in the last level page table.
|
|
|
|
|
let index = pte_index::<C>(from, 1);
|
|
|
|
|
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
|
|
|
|
|
let pte = unsafe { pte_ptr.read() };
|
|
|
|
|
if pte.is_present() {
|
|
|
|
|
panic!("mapping an already mapped page in the boot page table");
|
|
|
|
|
}
|
2024-06-14 15:07:50 +00:00
|
|
|
unsafe { pte_ptr.write(E::new_page(to * C::BASE_PAGE_SIZE, 1, prop)) };
|
2024-05-15 05:41:30 +00:00
|
|
|
}
|
|
|
|
|
|
2024-06-14 15:07:50 +00:00
|
|
|
/// Set protections of a base page mapping.
|
2024-06-12 09:31:33 +00:00
|
|
|
///
|
|
|
|
|
/// This function may split a huge page into base pages, causing page allocations
|
|
|
|
|
/// if the original mapping is a huge page.
|
|
|
|
|
///
|
|
|
|
|
/// # Panics
|
|
|
|
|
///
|
|
|
|
|
/// This function will panic if the page is already mapped.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// This function is unsafe because it can cause undefined behavior if the caller
|
|
|
|
|
/// maps a page in the kernel address space.
|
|
|
|
|
pub unsafe fn protect_base_page(
|
|
|
|
|
&mut self,
|
|
|
|
|
virt_addr: Vaddr,
|
|
|
|
|
mut op: impl FnMut(&mut PageProperty),
|
|
|
|
|
) {
|
|
|
|
|
let mut pt = self.root_pt;
|
|
|
|
|
let mut level = C::NR_LEVELS;
|
|
|
|
|
// Walk to the last level of the page table.
|
|
|
|
|
while level > 1 {
|
|
|
|
|
let index = pte_index::<C>(virt_addr, level);
|
|
|
|
|
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
|
|
|
|
|
let pte = unsafe { pte_ptr.read() };
|
|
|
|
|
pt = if !pte.is_present() {
|
|
|
|
|
panic!("protecting an unmapped page in the boot page table");
|
|
|
|
|
} else if pte.is_last(level) {
|
|
|
|
|
// Split the huge page.
|
|
|
|
|
let frame = self.alloc_frame();
|
|
|
|
|
let huge_pa = pte.paddr();
|
|
|
|
|
for i in 0..nr_subpage_per_huge::<C>() {
|
|
|
|
|
let nxt_ptr =
|
|
|
|
|
unsafe { (paddr_to_vaddr(frame * C::BASE_PAGE_SIZE) as *mut E).add(i) };
|
|
|
|
|
unsafe {
|
2024-06-14 15:07:50 +00:00
|
|
|
nxt_ptr.write(E::new_page(
|
2024-06-12 09:31:33 +00:00
|
|
|
huge_pa + i * C::BASE_PAGE_SIZE,
|
|
|
|
|
level - 1,
|
|
|
|
|
pte.prop(),
|
|
|
|
|
))
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
unsafe { pte_ptr.write(E::new_pt(frame * C::BASE_PAGE_SIZE)) };
|
|
|
|
|
frame
|
|
|
|
|
} else {
|
|
|
|
|
pte.paddr() / C::BASE_PAGE_SIZE
|
|
|
|
|
};
|
|
|
|
|
level -= 1;
|
|
|
|
|
}
|
|
|
|
|
// Do protection in the last level page table.
|
|
|
|
|
let index = pte_index::<C>(virt_addr, 1);
|
|
|
|
|
let pte_ptr = unsafe { (paddr_to_vaddr(pt * C::BASE_PAGE_SIZE) as *mut E).add(index) };
|
|
|
|
|
let pte = unsafe { pte_ptr.read() };
|
|
|
|
|
if !pte.is_present() {
|
|
|
|
|
panic!("protecting an unmapped page in the boot page table");
|
|
|
|
|
}
|
|
|
|
|
let mut prop = pte.prop();
|
|
|
|
|
op(&mut prop);
|
2024-06-14 15:07:50 +00:00
|
|
|
unsafe { pte_ptr.write(E::new_page(pte.paddr(), 1, prop)) };
|
2024-06-12 09:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
fn alloc_frame(&mut self) -> FrameNumber {
|
2024-12-25 14:53:24 +00:00
|
|
|
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
|
2024-05-15 05:41:30 +00:00
|
|
|
self.frames.push(frame);
|
|
|
|
|
// Zero it out.
|
|
|
|
|
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8;
|
|
|
|
|
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) };
|
|
|
|
|
frame
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
|
|
|
|
|
fn drop(&mut self) {
|
2024-06-12 08:55:59 +00:00
|
|
|
for frame in &self.frames {
|
2024-12-25 14:53:24 +00:00
|
|
|
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
|
2024-06-12 08:55:59 +00:00
|
|
|
}
|
2024-05-15 05:41:30 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-21 05:51:13 +00:00
|
|
|
#[cfg(ktest)]
|
|
|
|
|
use crate::prelude::*;
|
|
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
#[cfg(ktest)]
|
|
|
|
|
#[ktest]
|
2024-06-12 09:31:33 +00:00
|
|
|
fn test_boot_pt_map_protect() {
|
2024-05-15 05:41:30 +00:00
|
|
|
use super::page_walk;
|
|
|
|
|
use crate::{
|
|
|
|
|
arch::mm::{PageTableEntry, PagingConsts},
|
2024-06-02 11:00:34 +00:00
|
|
|
mm::{CachePolicy, FrameAllocOptions, PageFlags},
|
2024-05-15 05:41:30 +00:00
|
|
|
};
|
|
|
|
|
|
2024-12-24 10:20:55 +00:00
|
|
|
let root_frame = FrameAllocOptions::new().alloc_frame().unwrap();
|
2024-05-15 05:41:30 +00:00
|
|
|
let root_paddr = root_frame.start_paddr();
|
|
|
|
|
|
2024-05-26 16:25:59 +00:00
|
|
|
let mut boot_pt = BootPageTable::<PageTableEntry, PagingConsts> {
|
|
|
|
|
root_pt: root_paddr / PagingConsts::BASE_PAGE_SIZE,
|
|
|
|
|
frames: Vec::new(),
|
|
|
|
|
_pretend_to_use: core::marker::PhantomData,
|
|
|
|
|
};
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
let from1 = 0x1000;
|
|
|
|
|
let to1 = 0x2;
|
|
|
|
|
let prop1 = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
2024-06-12 09:31:33 +00:00
|
|
|
unsafe { boot_pt.map_base_page(from1, to1, prop1) };
|
2024-05-15 05:41:30 +00:00
|
|
|
assert_eq!(
|
|
|
|
|
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from1 + 1) },
|
|
|
|
|
Some((to1 * PAGE_SIZE + 1, prop1))
|
|
|
|
|
);
|
2024-06-12 09:31:33 +00:00
|
|
|
unsafe { boot_pt.protect_base_page(from1, |prop| prop.flags = PageFlags::RX) };
|
|
|
|
|
assert_eq!(
|
|
|
|
|
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from1 + 1) },
|
|
|
|
|
Some((
|
|
|
|
|
to1 * PAGE_SIZE + 1,
|
|
|
|
|
PageProperty::new(PageFlags::RX, CachePolicy::Writeback)
|
|
|
|
|
))
|
|
|
|
|
);
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
let from2 = 0x2000;
|
|
|
|
|
let to2 = 0x3;
|
|
|
|
|
let prop2 = PageProperty::new(PageFlags::RX, CachePolicy::Uncacheable);
|
2024-06-12 09:31:33 +00:00
|
|
|
unsafe { boot_pt.map_base_page(from2, to2, prop2) };
|
2024-05-15 05:41:30 +00:00
|
|
|
assert_eq!(
|
|
|
|
|
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from2 + 2) },
|
|
|
|
|
Some((to2 * PAGE_SIZE + 2, prop2))
|
|
|
|
|
);
|
2024-06-12 09:31:33 +00:00
|
|
|
unsafe { boot_pt.protect_base_page(from2, |prop| prop.flags = PageFlags::RW) };
|
|
|
|
|
assert_eq!(
|
|
|
|
|
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from2 + 2) },
|
|
|
|
|
Some((
|
|
|
|
|
to2 * PAGE_SIZE + 2,
|
|
|
|
|
PageProperty::new(PageFlags::RW, CachePolicy::Uncacheable)
|
|
|
|
|
))
|
|
|
|
|
);
|
2024-05-15 05:41:30 +00:00
|
|
|
}
|