Introduce kernel virtual memory allocation for kernel stack

This commit is contained in:
Ni Shirong 2024-09-24 06:09:15 +00:00 committed by Tate, Hongliang Tian
parent 03906513aa
commit 29eb37c07c
8 changed files with 270 additions and 53 deletions

View File

@ -199,6 +199,7 @@ impl From<ostd::Error> for Error {
ostd::Error::PageFault => Error::new(Errno::EFAULT),
ostd::Error::Overflow => Error::new(Errno::EOVERFLOW),
ostd::Error::MapAlreadyMappedVaddr => Error::new(Errno::EINVAL),
ostd::Error::KvaAllocError => Error::new(Errno::ENOMEM),
}
}
}

View File

@ -21,6 +21,8 @@ pub enum Error {
Overflow,
/// Memory mapping already exists for the given virtual address.
MapAlreadyMappedVaddr,
/// Error when allocating kernel virtual memory.
KvaAllocError,
}
impl From<PageTableError> for Error {

213
ostd/src/mm/kspace/kva.rs Normal file
View File

@ -0,0 +1,213 @@
// SPDX-License-Identifier: MPL-2.0
//! Kernel virtual memory allocation
use alloc::{collections::BTreeMap, vec::Vec};
use core::ops::{DerefMut, Range};
use align_ext::AlignExt;
use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE};
use crate::{
arch::mm::tlb_flush_addr_range,
mm::{
page::{
meta::{PageMeta, PageUsage},
Page,
},
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
page_table::PageTableItem,
Vaddr, PAGE_SIZE,
},
sync::SpinLock,
Error, Result,
};
pub(crate) use lazy_static::lazy_static;
pub struct KvaFreeNode {
block: Range<Vaddr>,
}
impl KvaFreeNode {
pub(crate) const fn new(range: Range<Vaddr>) -> Self {
Self { block: range }
}
}
pub struct VirtAddrAllocator {
freelist: BTreeMap<Vaddr, KvaFreeNode>,
}
impl VirtAddrAllocator {
fn new(range: Range<Vaddr>) -> Self {
let mut freelist:BTreeMap<Vaddr, KvaFreeNode> = BTreeMap::new();
freelist.insert(range.start, KvaFreeNode::new(range));
Self { freelist }
}
/// Allocate a kernel virtual area.
///
/// This is currently implemented with a simple FIRST-FIT algorithm.
fn alloc(&mut self, size: usize) -> Result<Range<Vaddr>> {
let mut allocate_range = None;
let mut to_remove = None;
for (key, value) in self.freelist.iter() {
if value.block.end - value.block.start >= size {
allocate_range = Some((value.block.end - size)..value.block.end);
to_remove = Some(*key);
break;
}
}
if let Some(key) = to_remove {
if let Some(freenode) = self.freelist.get_mut(&key) {
if freenode.block.end - size == freenode.block.start {
self.freelist.remove(&key);
} else {
freenode.block.end -= size;
}
}
}
if let Some(range) = allocate_range {
Ok(range)
} else {
Err(Error::KvaAllocError)
}
}
/// Free a kernel virtual area.
fn free(&mut self, range: Range<Vaddr>) {
// 1. get the previous free block, check if we can merge this block with the free one
// - if contiguous, merge this area with the free block.
// - if not contiguous, create a new free block, insert it into the list.
// 2. check if we can merge the current block with the next block, if we can, do so.
self.freelist.insert(range.start, KvaFreeNode::new(range));
todo!();
}
}
lazy_static! {
pub static ref KVA_ALLOCATOR: SpinLock<VirtAddrAllocator> = SpinLock::new(VirtAddrAllocator::new(TRACKED_MAPPED_PAGES_RANGE));
}
#[derive(Debug)]
pub struct Kva(Range<Vaddr>);
impl Kva {
// static KVA_FREELIST_2: SpinLock<BTreeMap<Vaddr, KvaFreeNode>> = SpinLock::new(BTreeMap::new());
pub fn new(size: usize) -> Self {
let mut lock_guard = KVA_ALLOCATOR.lock();
let var = lock_guard.deref_mut().alloc(size).unwrap();
Kva(var)
}
pub fn start(&self) -> Vaddr {
self.0.start
}
pub fn end(&self) -> Vaddr {
self.0.end
}
pub fn range(&self) -> Range<Vaddr> {
self.0.start..self.0.end
}
/// Map pages into the kernel virtual area.
/// # Safety
/// The caller should ensure either the mapped pages or the range to be used doesn't
/// violate the memory safety of kernel objects.
pub unsafe fn map_pages<T: PageMeta>(&mut self, range: Range<Vaddr>, pages: Vec<Page<T>>) {
assert!(range.len() == pages.len() * PAGE_SIZE);
assert!(self.start() <= range.start && self.end() >= range.end);
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let prop = PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Writeback,
priv_flags: PrivilegedPageFlags::GLOBAL,
};
let mut cursor = page_table.cursor_mut(&range).unwrap();
for page in pages.into_iter() {
cursor.map(page.into(), prop);
}
tlb_flush_addr_range(&range);
}
/// This function returns the page usage type based on the provided virtual address `addr`.
/// This function will fail in the following cases:
/// * If the address is not mapped (`NotMapped`), the function will fail.
/// * If the address is mapped to a `MappedUntracked` page, the function will fail.
pub fn get_page_type(&self, addr: Vaddr) -> PageUsage {
assert!(self.start() <= addr && self.end() >= addr);
let start = addr.align_down(PAGE_SIZE);
let vaddr = start..start + PAGE_SIZE;
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let mut cursor = page_table.cursor(&vaddr).unwrap();
let query_result = cursor.query().unwrap();
match query_result {
PageTableItem::Mapped {
va: _,
page,
prop: _,
} => page.usage(),
_ => {
panic!(
"Unexpected query result: Expected 'Mapped', found '{:?}'",
query_result
);
}
}
}
/// Get the mapped page.
/// This function will fail in the following cases:
/// * if the provided page type doesn't match the actual mapped one.
/// * If the address is not mapped (`NotMapped`), the function will fail.
/// * If the address is mapped to a `MappedUntracked` page, the function will fail.
pub fn get_page<T: PageMeta>(&self, addr: Vaddr) -> Result<Page<T>> {
assert!(self.start() <= addr && self.end() >= addr);
let start = addr.align_down(PAGE_SIZE);
let vaddr = start..start + PAGE_SIZE;
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let mut cursor = page_table.cursor(&vaddr).unwrap();
let query_result = cursor.query().unwrap();
match query_result {
PageTableItem::Mapped {
va: _,
page,
prop: _,
} => {
let result = Page::<T>::try_from(page);
if let Ok(page) = result {
Ok(page)
} else {
panic!("the provided page type doesn't match the actual mapped one");
}
}
_ => {
panic!(
"Unexpected query result: Expected 'Mapped', found '{:?}'",
query_result
);
}
}
}
}
impl Drop for Kva {
fn drop(&mut self) {
// 1. unmap all mapped pages.
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let range = self.start()..self.end();
let mut cursor = page_table.cursor_mut(&range).unwrap();
unsafe {
cursor.unmap(range.len());
}
tlb_flush_addr_range(&range);
// 2. free the virtual block
let mut lock_guard = KVA_ALLOCATOR.lock();
lock_guard.deref_mut().free(range);
}
}

View File

@ -17,17 +17,17 @@
//! +-+ <- 0xffff_ffff_8000_0000
//! | |
//! | | Unused hole.
//! +-+ <- 0xffff_ff00_0000_0000
//! | | For frame metadata, 1 TiB.
//! | | Mapped frames are tracked with handles.
//! +-+ <- 0xffff_fe00_0000_0000
//! | | For vm alloc/io mappings, 1 TiB.
//! | | Mapped frames are tracked with handles.
//! +-+ <- 0xffff_fd00_0000_0000
//! +-+ <- 0xffff_e100_0000_0000
//! | | For frame metadata, 1 TiB. Mapped frames are untracked.
//! +-+ <- 0xffff_e000_0000_0000
//! | | For [`kva::Kva`], 16 TiB. Mapped pages are tracked with handles.
//! +-+ <- 0xffff_d000_0000_0000
//! | | For [`kva::Kva`], 16 TiB. Mapped pages are untracked.
//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
//! | |
//! | |
//! | |
//! | | For linear mappings.
//! | | For linear mappings, 64 TiB.
//! | | Mapped physical addresses are untracked.
//! | |
//! | |
@ -38,6 +38,8 @@
//! If the address width is (according to [`crate::arch::mm::PagingConsts`])
//! 39 bits or 57 bits, the memory space just adjust proportionally.
pub(crate) mod kva;
use alloc::vec::Vec;
use core::ops::Range;
@ -85,13 +87,17 @@ const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
#[cfg(target_arch = "riscv64")]
const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_ff00_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_fe00_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_fd00_0000_0000 << ADDR_WIDTH_SHIFT;
pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
const TRACKED_MAPPED_PAGES_BASE_VADDR: Vaddr = 0xffff_d000_0000_0000 << ADDR_WIDTH_SHIFT;
pub const TRACKED_MAPPED_PAGES_RANGE: Range<Vaddr> =
TRACKED_MAPPED_PAGES_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..TRACKED_MAPPED_PAGES_BASE_VADDR;
/// The base address of the linear mapping of all physical
/// memory in the kernel address space.

View File

@ -80,6 +80,9 @@ pub enum PageUsage {
Meta = 65,
/// The page stores the kernel such as kernel code, data, etc.
Kernel = 66,
/// The page stores data for kernel stack.
KernelStack = 67,
}
#[repr(C)]
@ -268,6 +271,16 @@ impl PageMeta for KernelMeta {
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct KernelStackMeta {}
impl Sealed for KernelStackMeta {}
impl PageMeta for KernelStackMeta {
const USAGE: PageUsage = PageUsage::KernelStack;
fn on_drop(_page: &mut Page<Self>) {}
}
// ======== End of all the specific metadata structures definitions ===========
/// Initializes the metadata of all physical pages.

View File

@ -318,6 +318,9 @@ impl Drop for DynPage {
PageUsage::PageTable => {
meta::drop_as_last::<meta::PageTablePageMeta>(self.ptr);
}
PageUsage::KernelStack => {
meta::drop_as_last::<meta::KernelStackMeta>(self.ptr);
}
// The following pages don't have metadata and can't be dropped.
PageUsage::Unused
| PageUsage::Reserved

View File

@ -1,7 +1,11 @@
// SPDX-License-Identifier: MPL-2.0
use crate::{
mm::{kspace::KERNEL_PAGE_TABLE, FrameAllocOptions, Paddr, PageFlags, Segment, PAGE_SIZE},
mm::{
kspace::kva::Kva,
page::{allocator, meta::KernelStackMeta},
PAGE_SIZE,
},
prelude::*,
};
@ -19,59 +23,35 @@ pub static STACK_SIZE_IN_PAGES: u32 = parse_u32_or_default(
/// The default kernel stack size of a task, specified in pages.
pub const DEFAULT_STACK_SIZE_IN_PAGES: u32 = 128;
pub static KERNEL_STACK_SIZE: usize = STACK_SIZE_IN_PAGES as usize * PAGE_SIZE;
#[derive(Debug)]
pub struct KernelStack {
segment: Segment,
kva: Kva,
end_vaddr: Vaddr,
has_guard_page: bool,
}
impl KernelStack {
/// Generates a kernel stack with a guard page.
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
/// An additional page is allocated and be regarded as a guard page, which should not be accessed.
pub fn new_with_guard_page() -> Result<Self> {
let stack_segment =
FrameAllocOptions::new(STACK_SIZE_IN_PAGES as usize + 1).alloc_contiguous()?;
// FIXME: modifying the the linear mapping is bad.
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = stack_segment.start_paddr();
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
let mut new_kva = Kva::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE);
let mapped_start = new_kva.range().start + 2 * PAGE_SIZE;
let mapped_end = mapped_start + KERNEL_STACK_SIZE;
let pages = allocator::alloc(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap();
unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table
.protect_flush_tlb(&vaddr_range, |p| p.flags -= PageFlags::RW)
.unwrap();
new_kva.map_pages(mapped_start..mapped_end, pages);
}
Ok(Self {
segment: stack_segment,
kva: new_kva,
end_vaddr: mapped_end,
has_guard_page: true,
})
}
pub fn end_paddr(&self) -> Paddr {
self.segment.end_paddr()
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
if self.has_guard_page {
// FIXME: modifying the the linear mapping is bad.
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = self.segment.start_paddr();
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
unsafe {
let vaddr_range = guard_page_vaddr..guard_page_vaddr + PAGE_SIZE;
page_table
.protect_flush_tlb(&vaddr_range, |p| p.flags |= PageFlags::RW)
.unwrap();
}
}
pub fn end_vaddr(&self) -> Vaddr {
self.end_vaddr
}
}

View File

@ -178,8 +178,7 @@ impl TaskOptions {
// to at least 16 bytes. And a larger alignment is needed if larger arguments
// are passed to the function. The `kernel_task_entry` function does not
// have any arguments, so we only need to align the stack pointer to 16 bytes.
ctx.get_mut()
.set_stack_pointer(crate::mm::paddr_to_vaddr(kstack.end_paddr() - 16));
ctx.get_mut().set_stack_pointer(kstack.end_vaddr() - 16);
let new_task = Task {
func: SyncUnsafeCell::new(self.func),