2024-03-25 05:25:32 +00:00
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
|
2024-06-03 18:34:33 +00:00
|
|
|
#![allow(dead_code)]
|
|
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
//! Kernel memory space management.
|
2024-05-15 05:41:30 +00:00
|
|
|
//!
|
|
|
|
|
//! The kernel memory space is currently managed as follows, if the
|
|
|
|
|
//! address width is 48 bits (with 47 bits kernel space).
|
|
|
|
|
//!
|
|
|
|
|
//! ```text
|
|
|
|
|
//! +-+ <- the highest used address (0xffff_ffff_ffff_0000)
|
2024-05-26 16:25:59 +00:00
|
|
|
//! | | For the kernel code, 1 GiB. Mapped frames are untracked.
|
2024-05-15 05:41:30 +00:00
|
|
|
//! +-+ <- 0xffff_ffff_8000_0000
|
|
|
|
|
//! | |
|
|
|
|
|
//! | | Unused hole.
|
2024-05-26 16:25:59 +00:00
|
|
|
//! +-+ <- 0xffff_e100_0000_0000
|
|
|
|
|
//! | | For frame metadata, 1 TiB. Mapped frames are untracked.
|
2024-05-15 05:41:30 +00:00
|
|
|
//! +-+ <- 0xffff_e000_0000_0000
|
|
|
|
|
//! | |
|
|
|
|
|
//! | | For vm alloc/io mappings, 32 TiB.
|
|
|
|
|
//! | | Mapped frames are tracked with handles.
|
|
|
|
|
//! | |
|
|
|
|
|
//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
|
|
|
|
|
//! | |
|
|
|
|
|
//! | |
|
|
|
|
|
//! | |
|
|
|
|
|
//! | | For linear mappings, 64 TiB.
|
|
|
|
|
//! | | Mapped physical addresses are untracked.
|
|
|
|
|
//! | |
|
|
|
|
|
//! | |
|
|
|
|
|
//! | |
|
|
|
|
|
//! +-+ <- the base of high canonical address (0xffff_8000_0000_0000)
|
|
|
|
|
//! ```
|
|
|
|
|
//!
|
|
|
|
|
//! If the address width is (according to [`crate::arch::mm::PagingConsts`])
|
|
|
|
|
//! 39 bits or 57 bits, the memory space just adjust porportionally.
|
2024-03-25 05:25:32 +00:00
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
use alloc::vec::Vec;
|
2024-06-12 08:55:59 +00:00
|
|
|
use core::{mem::ManuallyDrop, ops::Range};
|
2024-05-05 14:51:01 +00:00
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
use align_ext::AlignExt;
|
2024-05-26 16:25:59 +00:00
|
|
|
use log::info;
|
2024-03-25 05:25:32 +00:00
|
|
|
use spin::Once;
|
|
|
|
|
|
2024-05-04 03:02:49 +00:00
|
|
|
use super::{
|
2024-05-15 05:41:30 +00:00
|
|
|
nr_subpage_per_huge,
|
2024-05-26 16:25:59 +00:00
|
|
|
page::{
|
|
|
|
|
meta::{mapping, KernelMeta},
|
|
|
|
|
Page,
|
|
|
|
|
},
|
2024-05-15 05:41:30 +00:00
|
|
|
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
|
|
|
|
|
page_table::{boot_pt::BootPageTable, KernelMode, PageTable},
|
2024-05-26 16:25:59 +00:00
|
|
|
MemoryRegionType, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE,
|
2024-03-25 05:25:32 +00:00
|
|
|
};
|
2024-06-12 08:55:59 +00:00
|
|
|
use crate::{
|
|
|
|
|
arch::mm::{PageTableEntry, PagingConsts},
|
|
|
|
|
sync::SpinLock,
|
|
|
|
|
};
|
2024-03-25 05:25:32 +00:00
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
/// The shortest supported address width is 39 bits. And the literal
|
|
|
|
|
/// values are written for 48 bits address width. Adjust the values
|
|
|
|
|
/// by arithmetic left shift.
|
|
|
|
|
const ADDR_WIDTH_SHIFT: isize = PagingConsts::ADDRESS_WIDTH as isize - 48;
|
2024-03-25 05:25:32 +00:00
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
/// Start of the kernel address space.
|
|
|
|
|
/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
|
|
|
|
|
pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
|
|
|
|
|
/// End of the kernel address space (non inclusive).
|
|
|
|
|
pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000 << ADDR_WIDTH_SHIFT;
|
2024-05-05 14:51:01 +00:00
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
/// The kernel code is linear mapped to this address.
|
|
|
|
|
///
|
|
|
|
|
/// FIXME: This offset should be randomly chosen by the loader or the
|
|
|
|
|
/// boot compatibility layer. But we disabled it because the framework
|
|
|
|
|
/// doesn't support relocatable kernel yet.
|
2024-05-15 05:41:30 +00:00
|
|
|
pub fn kernel_loaded_offset() -> usize {
|
|
|
|
|
KERNEL_CODE_BASE_VADDR
|
2024-03-25 05:25:32 +00:00
|
|
|
}
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
|
|
|
|
|
|
2024-05-26 16:25:59 +00:00
|
|
|
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
|
|
|
|
|
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
|
2024-05-26 17:53:44 +00:00
|
|
|
pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
|
2024-05-26 16:25:59 +00:00
|
|
|
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
|
2024-05-26 16:25:59 +00:00
|
|
|
pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
|
2024-05-15 05:41:30 +00:00
|
|
|
|
|
|
|
|
/// The base address of the linear mapping of all physical
|
|
|
|
|
/// memory in the kernel address space.
|
|
|
|
|
pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
|
|
|
|
|
pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
|
2024-03-25 05:25:32 +00:00
|
|
|
|
|
|
|
|
/// Convert physical address to virtual address using offset, only available inside aster-frame
|
2024-05-15 05:41:30 +00:00
|
|
|
pub fn paddr_to_vaddr(pa: Paddr) -> usize {
|
|
|
|
|
debug_assert!(pa < VMALLOC_BASE_VADDR - LINEAR_MAPPING_BASE_VADDR);
|
2024-03-25 05:25:32 +00:00
|
|
|
pa + LINEAR_MAPPING_BASE_VADDR
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-12 08:55:59 +00:00
|
|
|
/// The boot page table instance.
|
|
|
|
|
///
|
|
|
|
|
/// It is used in the initialization phase before [`KERNEL_PAGE_TABLE`] is activated.
|
|
|
|
|
/// Since we want dropping the boot page table unsafe, it is wrapped in a [`ManuallyDrop`].
|
|
|
|
|
pub static BOOT_PAGE_TABLE: SpinLock<Option<ManuallyDrop<BootPageTable>>> = SpinLock::new(None);
|
|
|
|
|
|
|
|
|
|
/// The kernel page table instance.
|
|
|
|
|
///
|
|
|
|
|
/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
|
|
|
|
|
/// is unlikely to be activated.
|
2024-05-04 03:02:49 +00:00
|
|
|
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingConsts>> =
|
2024-04-22 07:05:50 +00:00
|
|
|
Once::new();
|
2024-03-25 05:25:32 +00:00
|
|
|
|
2024-06-12 08:55:59 +00:00
|
|
|
/// Initializes the boot page table.
|
|
|
|
|
pub(crate) fn init_boot_page_table() {
|
|
|
|
|
let boot_pt = BootPageTable::from_current_pt();
|
|
|
|
|
*BOOT_PAGE_TABLE.lock() = Some(ManuallyDrop::new(boot_pt));
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-30 11:25:58 +00:00
|
|
|
/// Initializes the kernel page table.
|
2024-03-25 05:25:32 +00:00
|
|
|
///
|
|
|
|
|
/// This function should be called after:
|
|
|
|
|
/// - the page allocator and the heap allocator are initialized;
|
|
|
|
|
/// - the memory regions are initialized.
|
|
|
|
|
///
|
|
|
|
|
/// This function should be called before:
|
|
|
|
|
/// - any initializer that modifies the kernel page table.
|
2024-06-12 07:28:38 +00:00
|
|
|
pub fn init_kernel_page_table(meta_pages: Vec<Range<Paddr>>) {
|
2024-05-26 16:25:59 +00:00
|
|
|
info!("Initializing the kernel page table");
|
2024-05-15 05:41:30 +00:00
|
|
|
|
2024-05-26 16:25:59 +00:00
|
|
|
let regions = crate::boot::memory_regions();
|
|
|
|
|
let phys_mem_cap = regions.iter().map(|r| r.base() + r.len()).max().unwrap();
|
2024-05-15 05:41:30 +00:00
|
|
|
|
2024-05-30 11:25:58 +00:00
|
|
|
// Start to initialize the kernel page table.
|
2024-05-15 05:41:30 +00:00
|
|
|
let kpt = PageTable::<KernelMode>::empty();
|
|
|
|
|
|
|
|
|
|
// Make shared the page tables mapped by the root table in the kernel space.
|
|
|
|
|
{
|
|
|
|
|
let pte_index_max = nr_subpage_per_huge::<PagingConsts>();
|
|
|
|
|
kpt.make_shared_tables(pte_index_max / 2..pte_index_max);
|
|
|
|
|
}
|
2024-05-05 12:51:38 +00:00
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
// Do linear mappings for the kernel.
|
2024-05-05 12:51:38 +00:00
|
|
|
{
|
2024-05-15 05:41:30 +00:00
|
|
|
let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + phys_mem_cap;
|
|
|
|
|
let to = 0..phys_mem_cap;
|
2024-05-05 12:51:38 +00:00
|
|
|
let prop = PageProperty {
|
|
|
|
|
flags: PageFlags::RW,
|
|
|
|
|
cache: CachePolicy::Writeback,
|
|
|
|
|
priv_flags: PrivilegedPageFlags::GLOBAL,
|
|
|
|
|
};
|
2024-05-21 12:07:26 +00:00
|
|
|
// SAFETY: we are doing the linear mapping for the kernel.
|
2024-05-05 12:51:38 +00:00
|
|
|
unsafe {
|
|
|
|
|
kpt.map(&from, &to, prop).unwrap();
|
2024-03-25 05:25:32 +00:00
|
|
|
}
|
|
|
|
|
}
|
2024-05-05 12:51:38 +00:00
|
|
|
|
2024-05-15 05:41:30 +00:00
|
|
|
// Map the metadata pages.
|
|
|
|
|
{
|
2024-05-26 16:25:59 +00:00
|
|
|
let start_va = mapping::page_to_meta::<PagingConsts>(0);
|
|
|
|
|
let from = start_va..start_va + meta_pages.len() * PAGE_SIZE;
|
2024-05-15 05:41:30 +00:00
|
|
|
let prop = PageProperty {
|
|
|
|
|
flags: PageFlags::RW,
|
|
|
|
|
cache: CachePolicy::Writeback,
|
|
|
|
|
priv_flags: PrivilegedPageFlags::GLOBAL,
|
|
|
|
|
};
|
|
|
|
|
let mut cursor = kpt.cursor_mut(&from).unwrap();
|
2024-05-26 16:25:59 +00:00
|
|
|
for frame in meta_pages {
|
2024-05-16 16:42:36 +00:00
|
|
|
// SAFETY: we are doing the metadata mappings for the kernel.
|
2024-05-15 05:41:30 +00:00
|
|
|
unsafe {
|
2024-05-26 16:25:59 +00:00
|
|
|
cursor.map_pa(&frame, prop);
|
2024-05-15 05:41:30 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
// Map for the I/O area.
|
|
|
|
|
// TODO: we need to have an allocator to allocate kernel space for
|
|
|
|
|
// the I/O areas, rather than doing it using the linear mappings.
|
2024-05-05 12:51:38 +00:00
|
|
|
{
|
|
|
|
|
let to = 0x8_0000_0000..0x9_0000_0000;
|
|
|
|
|
let from = LINEAR_MAPPING_BASE_VADDR + to.start..LINEAR_MAPPING_BASE_VADDR + to.end;
|
|
|
|
|
let prop = PageProperty {
|
|
|
|
|
flags: PageFlags::RW,
|
|
|
|
|
cache: CachePolicy::Uncacheable,
|
|
|
|
|
priv_flags: PrivilegedPageFlags::GLOBAL,
|
|
|
|
|
};
|
2024-05-21 12:07:26 +00:00
|
|
|
// SAFETY: we are doing I/O mappings for the kernel.
|
2024-05-05 12:51:38 +00:00
|
|
|
unsafe {
|
|
|
|
|
kpt.map(&from, &to, prop).unwrap();
|
|
|
|
|
}
|
2024-03-25 05:25:32 +00:00
|
|
|
}
|
2024-05-05 12:51:38 +00:00
|
|
|
|
2024-03-25 05:25:32 +00:00
|
|
|
// Map for the kernel code itself.
|
|
|
|
|
// TODO: set separated permissions for each segments in the kernel.
|
2024-05-05 12:51:38 +00:00
|
|
|
{
|
|
|
|
|
let region = regions
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|r| r.typ() == MemoryRegionType::Kernel)
|
|
|
|
|
.unwrap();
|
|
|
|
|
let offset = kernel_loaded_offset();
|
|
|
|
|
let to =
|
|
|
|
|
region.base().align_down(PAGE_SIZE)..(region.base() + region.len()).align_up(PAGE_SIZE);
|
|
|
|
|
let from = to.start + offset..to.end + offset;
|
|
|
|
|
let prop = PageProperty {
|
|
|
|
|
flags: PageFlags::RWX,
|
|
|
|
|
cache: CachePolicy::Writeback,
|
|
|
|
|
priv_flags: PrivilegedPageFlags::GLOBAL,
|
|
|
|
|
};
|
2024-05-16 16:42:36 +00:00
|
|
|
let mut cursor = kpt.cursor_mut(&from).unwrap();
|
|
|
|
|
for frame_paddr in to.step_by(PAGE_SIZE) {
|
2024-05-27 09:10:09 +00:00
|
|
|
let page = Page::<KernelMeta>::from_unused(frame_paddr);
|
|
|
|
|
let paddr = page.into_raw();
|
2024-05-16 16:42:36 +00:00
|
|
|
// SAFETY: we are doing mappings for the kernel.
|
|
|
|
|
unsafe {
|
2024-05-26 16:25:59 +00:00
|
|
|
cursor.map_pa(&(paddr..paddr + PAGE_SIZE), prop);
|
2024-05-16 16:42:36 +00:00
|
|
|
}
|
2024-05-05 12:51:38 +00:00
|
|
|
}
|
2024-03-25 05:25:32 +00:00
|
|
|
}
|
2024-05-05 12:51:38 +00:00
|
|
|
|
2024-06-12 07:28:38 +00:00
|
|
|
KERNEL_PAGE_TABLE.call_once(|| kpt);
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-12 08:55:59 +00:00
|
|
|
pub fn activate_kernel_page_table() {
|
2024-06-12 07:28:38 +00:00
|
|
|
let kpt = KERNEL_PAGE_TABLE
|
|
|
|
|
.get()
|
|
|
|
|
.expect("The kernel page table is not initialized yet");
|
2024-05-16 16:42:36 +00:00
|
|
|
// SAFETY: the kernel page table is initialized properly.
|
2024-05-15 05:41:30 +00:00
|
|
|
unsafe {
|
2024-06-03 11:28:31 +00:00
|
|
|
kpt.first_activate_unchecked();
|
2024-05-15 05:41:30 +00:00
|
|
|
crate::arch::mm::tlb_flush_all_including_global();
|
|
|
|
|
}
|
2024-06-12 08:55:59 +00:00
|
|
|
|
|
|
|
|
// SAFETY: the boot page table is OK to be dropped now since
|
2024-06-03 11:28:31 +00:00
|
|
|
// the kernel page table is activated.
|
2024-06-12 08:55:59 +00:00
|
|
|
let mut boot_pt = BOOT_PAGE_TABLE.lock().take().unwrap();
|
|
|
|
|
unsafe { ManuallyDrop::drop(&mut boot_pt) };
|
2024-05-15 05:41:30 +00:00
|
|
|
}
|