finish virtual memory and part of task

This commit is contained in:
Yuke Peng 2022-08-23 02:50:07 -07:00
parent 2f9dd04259
commit cecf2dac98
37 changed files with 1550 additions and 101 deletions

47
src/Cargo.lock generated
View File

@ -91,7 +91,8 @@ dependencies = [
"buddy_system_allocator",
"font8x8",
"lazy_static",
"spin 0.5.2",
"linked_list_allocator",
"spin 0.9.4",
"volatile",
"x86_64",
]
@ -102,6 +103,7 @@ version = "0.1.0"
dependencies = [
"intrusive-collections",
"kxos-frame",
"spin 0.9.4",
"xmas-elf",
]
@ -114,6 +116,15 @@ dependencies = [
"spin 0.5.2",
]
[[package]]
name = "linked_list_allocator"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
dependencies = [
"spinning_top",
]
[[package]]
name = "locate-cargo-manifest"
version = "0.2.2"
@ -123,6 +134,16 @@ dependencies = [
"json",
]
[[package]]
name = "lock_api"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "memoffset"
version = "0.5.6"
@ -138,6 +159,12 @@ version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "spin"
version = "0.5.2"
@ -150,6 +177,24 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13287b4da9d1207a4f4929ac390916d64eacfe236a487e9a9f5b3be392be5162"
[[package]]
name = "spin"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
dependencies = [
"lock_api",
]
[[package]]
name = "spinning_top"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c"
dependencies = [
"lock_api",
]
[[package]]
name = "volatile"
version = "0.4.5"

View File

@ -20,4 +20,8 @@ members = [
panic = "abort"
[profile.release]
panic = "abort"
panic = "abort"
[package.metadata.bootloader]
map-physical-memory = true
physical-memory-offset = "0xFFFF800000000000"

View File

@ -8,12 +8,15 @@ edition = "2021"
[dependencies]
bitflags = "1.3"
x86_64 = "0.14.2"
spin = "0.5.2"
spin = "0.9.4"
volatile = {version="0.4.5", features = ["unstable"] }
buddy_system_allocator = "0.6"
bootloader = "0.10.12"
linked_list_allocator = "0.9.0"
bootloader = {version="0.10.12"}
font8x8 = { version = "0.2.5", default-features = false, features = ["unicode"]}
[dependencies.lazy_static]
version = "1.0"
features = ["spin_no_std"]

View File

@ -1,5 +0,0 @@
mod buddy_system_allocator;
pub fn init() {
buddy_system_allocator::init_heap();
}

View File

@ -4,6 +4,10 @@ pub const USER_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_HEAP_SIZE: usize = 0x20_0000;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;
pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const MEM_START: usize = 0x8000_0000;

View File

@ -68,7 +68,8 @@ impl FpRegs {
//let buf = Aligned(unsafe { MaybeUninit::uninit().assume_init() });
//let is_valid = false;
//Self { buf, is_valid }
todo!("import aligned")
Self{is_valid:false}
// todo!("import aligned")
}
/// Save CPU's current floating pointer states into this instance.

View File

@ -1,6 +1,5 @@
use crate::prelude::*;
use alloc::collections::{linked_list::CursorMut, LinkedList};
use lazy_static::lazy_static;
use spin::Mutex;
use x86_64::{

View File

@ -4,12 +4,10 @@ pub mod framebuffer;
mod io_port;
mod irq;
use bootloader::BootInfo;
pub use self::io_port::IoPort;
pub use self::irq::{InterruptInformation, IrqCallbackHandle, IrqLine};
pub fn init(boot_info: &'static mut BootInfo) {
framebuffer::init(boot_info.framebuffer.as_mut().unwrap());
pub fn init(framebuffer: &'static mut bootloader::boot_info::FrameBuffer) {
framebuffer::init(framebuffer);
irq::init();
}

View File

@ -4,50 +4,80 @@
#![allow(unused_variables)]
#![feature(negative_impls)]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(fn_traits)]
#![feature(linked_list_cursors)]
#![feature(const_maybe_uninit_zeroed)]
#![feature(alloc_error_handler)]
#![feature(core_intrinsics)]
extern crate alloc;
mod allocator;
pub mod config;
pub mod cpu;
pub mod device;
mod error;
pub mod log;
pub mod mm;
pub mod prelude;
pub mod sync;
pub mod task;
pub mod timer;
pub mod trap;
pub mod user;
mod util;
pub mod vm;
pub use self::error::Error;
use alloc::sync::Arc;
use bootloader::BootInfo;
use device::{InterruptInformation, IrqCallbackHandle, IrqLine};
use core::mem;
static mut STORE: Option<IrqCallbackHandle> = None;
pub use self::error::Error;
pub use self::sync::up::UPSafeCell;
use alloc::{boxed::Box, sync::Arc};
use bootloader::{boot_info::MemoryRegionKind, BootInfo};
use device::{InterruptInformation, IrqLine};
pub fn init(boot_info: &'static mut BootInfo) {
allocator::init();
device::init(boot_info);
device::init(boot_info.framebuffer.as_mut().unwrap());
device::framebuffer::WRITER.lock().as_mut().unwrap().clear();
println!(
"heap_value at {:x}",
boot_info.physical_memory_offset.into_option().unwrap()
);
let mut memory_init = false;
// memory
for region in boot_info.memory_regions.iter() {
if region.kind == MemoryRegionKind::Usable {
let start: u64 = region.start;
let size: u64 = region.end - region.start;
println!(
"[kernel] physical frames start = {:x}, size = {:x}",
start, size
);
mm::init(start, size);
memory_init = true;
break;
}
}
if !memory_init {
panic!("memory init failed");
}
// breakpoint
let breakpoint_irq: Arc<&IrqLine>;
unsafe {
breakpoint_irq = IrqLine::acquire(3);
}
let a = breakpoint_irq.on_active(breakpoint_handler);
let b = breakpoint_irq.on_active(breakpoint_handler);
unsafe {
STORE = Some(a);
}
x86_64::instructions::interrupts::int3(); // new
x86_64::instructions::interrupts::int3(); // breakpoint
let heap_value = Box::new(41);
println!("test");
println!("heap_value at {:p}", heap_value);
}
fn breakpoint_handler(interrupt_information: InterruptInformation) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", interrupt_information);
}
#[inline(always)]
pub const fn zero<T>() -> T {
unsafe { mem::MaybeUninit::zeroed().assume_init() }
}

View File

@ -0,0 +1,29 @@
ENTRY(_start)
KERNEL_BEGIN = 0xffffff0000000000;
SECTIONS {
. = KERNEL_BEGIN;
.rodata ALIGN(4K): {
*(.rodata .rodata.*)
}
.text ALIGN(4K): {
*(.text .text.*)
}
.data ALIGN(4K): {
*(.data .data.*)
*(.sdata .sdata.*)
}
.got ALIGN(4K): {
*(.got .got.*)
}
.bss ALIGN(4K): {
*(.bss .bss.*)
*(.sbss .sbss.*)
}
}

View File

@ -1,11 +1,18 @@
use core::fmt::Arguments;
use crate::device::framebuffer::WRITER;
/// Print log message
/// This function should *NOT* be directly called.
/// Instead, print logs with macros.
#[doc(hidden)]
pub fn log_print(args: Arguments) {
todo!()
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().as_mut().unwrap().write_fmt(args).unwrap();
});
}
/// This macro should not be directly called.

View File

@ -0,0 +1,247 @@
use core::ops::{Add, AddAssign, Sub, SubAssign};
use alloc::fmt;
use crate::config::{PAGE_SIZE, PHYS_OFFSET};
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct PhysAddr(pub usize);
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct VirtAddr(pub usize);
pub const fn phys_to_virt(pa: usize) -> usize {
pa + PHYS_OFFSET
}
pub const fn virt_to_phys(va: usize) -> usize {
va - PHYS_OFFSET
}
impl PhysAddr {
pub const fn kvaddr(self) -> VirtAddr {
VirtAddr(phys_to_virt(self.0))
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("PhysAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for PhysAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for PhysAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for PhysAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for PhysAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
impl VirtAddr {
pub const fn as_ptr(self) -> *mut u8 {
self.0 as _
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl VirtAddr {
pub fn get_bytes_array(&self) -> &'static mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.0 as *mut u8, 4096) }
}
pub fn get_ref<T>(&self) -> &'static T {
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VirtAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for VirtAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for VirtAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for VirtAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for VirtAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
pub const fn align_down(p: usize) -> usize {
p & !(PAGE_SIZE - 1)
}
pub const fn align_up(p: usize) -> usize {
(p + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
}
pub const fn page_offset(p: usize) -> usize {
p & (PAGE_SIZE - 1)
}
pub const fn is_aligned(p: usize) -> bool {
page_offset(p) == 0
}

View File

@ -0,0 +1,94 @@
use alloc::vec::Vec;
use crate::{config::PAGE_SIZE, UPSafeCell};
use super::address::PhysAddr;
use lazy_static::lazy_static;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPSafeCell<FreeListAllocator> = unsafe {
UPSafeCell::new(FreeListAllocator {
current: 0,
end: 0,
free_list: Vec::new(),
})
};
}
trait FrameAllocator {
fn alloc(&mut self) -> Option<usize>;
fn dealloc(&mut self, value: usize);
}
pub struct FreeListAllocator {
current: usize,
end: usize,
free_list: Vec<usize>,
}
impl FreeListAllocator {
fn alloc(&mut self) -> Option<usize> {
let mut ret = 0;
if let Some(x) = self.free_list.pop() {
ret = x;
} else if self.current < self.end {
ret = self.current;
self.current += PAGE_SIZE;
};
Some(ret)
}
fn dealloc(&mut self, value: usize) {
assert!(!self.free_list.contains(&value));
self.free_list.push(value);
}
}
#[derive(Debug, Clone)]
// #[repr(transparent)]
pub struct PhysFrame {
start_pa: usize,
}
impl PhysFrame {
pub const fn start_pa(&self) -> PhysAddr {
PhysAddr(self.start_pa)
}
pub fn alloc() -> Option<Self> {
FRAME_ALLOCATOR
.exclusive_access()
.alloc()
.map(|pa| Self { start_pa: pa })
}
pub fn dealloc(pa: usize) {
FRAME_ALLOCATOR.exclusive_access().dealloc(pa)
}
pub fn alloc_zero() -> Option<Self> {
let mut f = Self::alloc()?;
f.zero();
Some(f)
}
pub fn zero(&mut self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) }
}
pub fn as_slice(&self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.start_pa().kvaddr().as_ptr(), PAGE_SIZE) }
}
}
impl Drop for PhysFrame {
fn drop(&mut self) {
FRAME_ALLOCATOR.exclusive_access().dealloc(self.start_pa);
}
}
pub(crate) fn init(start: usize, size: usize) {
FRAME_ALLOCATOR.exclusive_access().current = start;
FRAME_ALLOCATOR.exclusive_access().end = start + size;
}

View File

@ -11,7 +11,7 @@ pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
pub fn init_heap() {
pub fn init() {
unsafe {
HEAP_ALLOCATOR
.lock()

View File

@ -0,0 +1,281 @@
use super::{page_table::PageTable, *};
use crate::prelude::*;
use crate::{
config::PAGE_SIZE,
mm::address::{is_aligned},
vm::{VmFrame, VmFrameVec},
*,
};
use alloc::{
collections::{btree_map::Entry, BTreeMap}};
use core::fmt;
use x86_64::registers::control::Cr3Flags;
// use xmas_elf::{program::{SegmentData, Type}, {header, ElfFile}};
pub const USTACK_SIZE: usize = 4096 * 4;
pub const USTACK_TOP: usize = 0x8000_0000_0000;
pub struct MapArea {
/// flags
pub flags: PTFlags,
/// all the map information
pub mapper: BTreeMap<VirtAddr, VmFrame>,
}
pub struct MemorySet {
pub pt: PageTable,
/// all the map area
area: Option<MapArea>,
}
impl MapArea {
pub fn mapped_size(&self) -> usize {
self.mapper.len()
}
/// This function will map the vitural address to the given physical frames
pub fn new(
start_va: VirtAddr,
size: usize,
flags: PTFlags,
physical_frames: VmFrameVec,
) -> Self {
assert!(
start_va.is_aligned()
&& is_aligned(size)
&& physical_frames.len() == (size / PAGE_SIZE)
);
let mut map_area = Self {
flags,
mapper: BTreeMap::new(),
};
let mut current_va = start_va.clone();
let page_size = size / PAGE_SIZE;
let mut phy_frame_iter = physical_frames.iter();
for i in 0..page_size {
let vm_frame = phy_frame_iter.next().unwrap();
map_area.map_with_physical_address(current_va, vm_frame.clone());
current_va+=PAGE_SIZE;
}
map_area
}
pub fn map_with_physical_address(&mut self, va: VirtAddr, pa: VmFrame) -> PhysAddr {
assert!(va.is_aligned());
match self.mapper.entry(va) {
Entry::Occupied(e) => panic!("already mapped a input physical address"),
Entry::Vacant(e) => e.insert(pa).physical_frame.exclusive_access().start_pa(),
}
}
pub fn map(&mut self, va: VirtAddr) -> PhysAddr {
assert!(va.is_aligned());
match self.mapper.entry(va) {
Entry::Occupied(e) => e.get().physical_frame.exclusive_access().start_pa(),
Entry::Vacant(e) => e
.insert(VmFrame::alloc_zero().unwrap())
.physical_frame
.exclusive_access()
.start_pa(),
}
}
pub fn unmap(&mut self, va: VirtAddr) -> Option<VmFrame> {
self.mapper.remove(&va)
}
pub fn write_data(&mut self, offset: usize, data: &[u8]) {
let mut start = offset;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter_mut() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
return;
}
}
}
}
pub fn read_data(&self, offset: usize, data: &mut [u8]) {
let mut start = offset;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
src.copy_from_slice(dst);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
return;
}
}
}
}
}
impl Clone for MapArea {
fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = VmFrame::alloc().unwrap();
new.physical_frame
.exclusive_access()
.as_slice()
.copy_from_slice(old.physical_frame.exclusive_access().as_slice());
mapper.insert(va, new);
}
Self {
flags: self.flags,
mapper,
}
}
}
impl MemorySet {
pub fn new(area: MapArea) -> Self {
let mut pt = PageTable::new();
pt.map_area(&area);
Self {
pt: PageTable::new(),
area: Some(area),
}
}
pub fn zero() -> Self {
Self {
pt: PageTable::new(),
area: None,
}
}
pub fn unmap(&mut self, va: VirtAddr) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.take().unwrap().unmap(va);
Ok(())
}
}
pub fn clear(&mut self) {
self.pt.unmap_area(&self.area.take().unwrap());
self.area = None;
}
pub fn activate(&self) {
unsafe {
x86_64::registers::control::Cr3::write(
x86_64::structures::paging::PhysFrame::from_start_address(x86_64::PhysAddr::new(
self.pt.root_pa.0 as u64,
))
.unwrap(),
Cr3Flags::empty(),
);
}
}
pub fn write_bytes(&mut self, offset: usize, data: &[u8]) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.take().unwrap().write_data(offset, data);
Ok(())
}
}
pub fn read_bytes(&self, offset: usize, data: &mut [u8]) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.as_ref().unwrap().read_data(offset, data);
Ok(())
}
}
}
impl Clone for MemorySet {
fn clone(&self) -> Self {
if self.area.is_none() {
Self::zero()
} else {
Self::new(self.area.clone().unwrap())
}
}
}
impl Drop for MemorySet {
fn drop(&mut self) {
self.clear();
}
}
impl fmt::Debug for MapArea {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MapArea")
.field("flags", &self.flags)
.field("mapped area", &self.mapper)
.finish()
}
}
impl fmt::Debug for MemorySet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemorySet")
.field("areas", &self.area)
.field("page_table_root", &self.pt.root_pa)
.finish()
}
}
// pub fn load_app(elf_data: &[u8]) -> (usize, MemorySet) {
// let elf = ElfFile::new(elf_data).expect("invalid ELF file");
// assert_eq!(elf.header.pt1.class(), header::Class::SixtyFour, "64-bit ELF required");
// assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not an executable object");
// assert_eq!(elf.header.pt2.machine().as_machine(), header::Machine::X86_64, "invalid ELF arch");
// let mut ms = MemorySet::new();
// for ph in elf.program_iter() {
// if ph.get_type() != Ok(Type::Load) {
// continue;
// }
// let va = VirtAddr(ph.virtual_addr() as _);
// let offset = va.page_offset();
// let area_start = va.align_down();
// let area_end = VirtAddr((ph.virtual_addr() + ph.mem_size()) as _).align_up();
// let data = match ph.get_data(&elf).unwrap() {
// SegmentData::Undefined(data) => data,
// _ => panic!("failed to get ELF segment data"),
// };
// let mut flags = PTFlags::PRESENT | PTFlags::USER;
// if ph.flags().is_write() {
// flags |= PTFlags::WRITABLE;
// }
// let mut area = MapArea::new(area_start, area_end.0 - area_start.0, flags);
// area.write_data(offset, data);
// ms.insert(area);
// }
// ms.insert(MapArea::new(VirtAddr(USTACK_TOP - USTACK_SIZE), USTACK_SIZE,
// PTFlags::PRESENT | PTFlags::WRITABLE | PTFlags::USER));
// (elf.header.pt2.entry_point() as usize, ms)
// }

View File

@ -0,0 +1,38 @@
//! memory management.
pub mod address;
mod frame_allocator;
mod heap_allocator;
mod memory_set;
mod page_table;
use address::PhysAddr;
use address::VirtAddr;
pub use self::{frame_allocator::*, memory_set::*, page_table::*};
bitflags::bitflags! {
/// Possible flags for a page table entry.
pub struct PTFlags: usize {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
}
}
pub fn init(start: u64, size: u64) {
heap_allocator::init();
frame_allocator::init(start as usize, size as usize);
page_table::init();
}

View File

@ -0,0 +1,192 @@
use alloc::{vec, vec::Vec};
use super::{memory_set::MapArea, *};
use crate::{
config::{ENTRY_COUNT, KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET},
vm::VmFrame,
*,
};
use core::fmt;
static KERNEL_PTE: UPSafeCell<PageTableEntry> = zero();
static PHYS_PTE: UPSafeCell<PageTableEntry> = zero();
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct PageTableEntry(usize);
impl PageTableEntry {
const PHYS_ADDR_MASK: usize = !(PAGE_SIZE - 1);
pub const fn new_page(pa: PhysAddr, flags: PTFlags) -> Self {
Self((pa.0 & Self::PHYS_ADDR_MASK) | flags.bits)
}
const fn pa(self) -> PhysAddr {
PhysAddr(self.0 as usize & Self::PHYS_ADDR_MASK)
}
const fn flags(self) -> PTFlags {
PTFlags::from_bits_truncate(self.0)
}
const fn is_unused(self) -> bool {
self.0 == 0
}
const fn is_present(self) -> bool {
(self.0 & PTFlags::PRESENT.bits) != 0
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("raw", &self.0)
.field("pa", &self.pa())
.field("flags", &self.flags())
.finish()
}
}
pub struct PageTable {
pub root_pa: PhysAddr,
/// store all the physical frame that the page table need to map all the frame e.g. the frame of the root_pa
tables: Vec<VmFrame>,
}
impl PageTable {
pub fn new() -> Self {
let root_frame = VmFrame::alloc_zero().unwrap();
let p4 = table_of(root_frame.start_pa());
p4[p4_index(VirtAddr(KERNEL_OFFSET))] = *KERNEL_PTE.exclusive_access();
p4[p4_index(VirtAddr(PHYS_OFFSET))] = *PHYS_PTE.exclusive_access();
Self {
root_pa: root_frame.start_pa(),
tables: vec![root_frame],
}
}
pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) {
let entry = self.get_entry_or_create(va).unwrap();
if !entry.is_unused() {
panic!("{:#x?} is mapped before mapping", va);
}
*entry = PageTableEntry::new_page(pa.align_down(), flags);
}
pub fn unmap(&mut self, va: VirtAddr) {
let entry = get_entry(self.root_pa, va).unwrap();
if entry.is_unused() {
panic!("{:#x?} is invalid before unmapping", va);
}
entry.0 = 0;
}
pub fn map_area(&mut self, area: &MapArea) {
println!("frame test");
for (va, pa) in area.mapper.iter() {
assert!(pa.start_pa().0 < PHYS_OFFSET);
self.map(*va, pa.start_pa(), area.flags);
}
}
pub fn unmap_area(&mut self, area: &MapArea) {
for (va, _) in area.mapper.iter() {
self.unmap(*va);
}
}
}
impl PageTable {
fn alloc_table(&mut self) -> PhysAddr {
let frame = VmFrame::alloc_zero().unwrap();
let pa = frame.start_pa();
self.tables.push(frame);
pa
}
fn get_entry_or_create(&mut self, va: VirtAddr) -> Option<&mut PageTableEntry> {
let p4 = table_of(self.root_pa);
let p4e = &mut p4[p4_index(va)];
let p3 = next_table_or_create(p4e, || self.alloc_table())?;
let p3e = &mut p3[p3_index(va)];
let p2 = next_table_or_create(p3e, || self.alloc_table())?;
let p2e = &mut p2[p2_index(va)];
let p1 = next_table_or_create(p2e, || self.alloc_table())?;
let p1e = &mut p1[p1_index(va)];
Some(p1e)
}
}
const fn p4_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 27)) & (ENTRY_COUNT - 1)
}
const fn p3_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 18)) & (ENTRY_COUNT - 1)
}
const fn p2_index(va: VirtAddr) -> usize {
(va.0 >> (12 + 9)) & (ENTRY_COUNT - 1)
}
const fn p1_index(va: VirtAddr) -> usize {
(va.0 >> 12) & (ENTRY_COUNT - 1)
}
pub fn query(root_pa: PhysAddr, va: VirtAddr) -> Option<(PhysAddr, PTFlags)> {
let entry = get_entry(root_pa, va)?;
if entry.is_unused() {
return None;
}
let off = va.page_offset();
Some((PhysAddr(entry.pa().0 + off), entry.flags()))
}
fn get_entry(root_pa: PhysAddr, va: VirtAddr) -> Option<&'static mut PageTableEntry> {
let p4 = table_of(root_pa);
let p4e = &mut p4[p4_index(va)];
let p3 = next_table(p4e)?;
let p3e = &mut p3[p3_index(va)];
let p2 = next_table(p3e)?;
let p2e = &mut p2[p2_index(va)];
let p1 = next_table(p2e)?;
let p1e = &mut p1[p1_index(va)];
Some(p1e)
}
fn table_of<'a>(pa: PhysAddr) -> &'a mut [PageTableEntry] {
let ptr = pa.kvaddr().as_ptr() as *mut _;
unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) }
}
fn next_table<'a>(entry: &PageTableEntry) -> Option<&'a mut [PageTableEntry]> {
if entry.is_present() {
Some(table_of(entry.pa()))
} else {
None
}
}
fn next_table_or_create<'a>(
entry: &mut PageTableEntry,
mut alloc: impl FnMut() -> PhysAddr,
) -> Option<&'a mut [PageTableEntry]> {
if entry.is_unused() {
let pa = alloc();
*entry = PageTableEntry::new_page(pa, PTFlags::PRESENT | PTFlags::WRITABLE | PTFlags::USER);
Some(table_of(pa))
} else {
next_table(entry)
}
}
pub(crate) fn init() {
let (cr3, _) = x86_64::registers::control::Cr3::read();
let p4 = table_of(PhysAddr(cr3.start_address().as_u64() as usize));
*KERNEL_PTE.exclusive_access() = p4[p4_index(VirtAddr(KERNEL_OFFSET))];
*PHYS_PTE.exclusive_access() = p4[p4_index(VirtAddr(PHYS_OFFSET))];
println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]);
println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]);
// Cancel mapping in lowest addresses.
// p4[0].0 = 0;
}

View File

@ -1,4 +1,5 @@
mod spin;
pub mod up;
mod wait;
pub use self::spin::{SpinLock, SpinLockGuard};

View File

@ -0,0 +1,32 @@
use core::{
cell::{RefCell, RefMut},};
#[derive(Debug)]
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
///
/// We should only use it in uniprocessor.
///
/// In order to get mutable reference of inner data, call
/// `exclusive_access`.
pub struct UPSafeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPSafeCell<T> {}
impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}

View File

@ -1,5 +1,6 @@
//! Tasks are the unit of code execution.
mod processor;
mod scheduler;
#[allow(clippy::module_inception)]
mod task;

View File

@ -0,0 +1,73 @@
use super::{
task::{context_switch, TaskContext},
Task, scheduler::{fetch_task, GLOBAL_SCHEDULER}, TaskStatus,
};
use crate::UPSafeCell;
use alloc:: sync::Arc;
use lazy_static::*;
pub struct Processor {
current: Option<Arc<Task>>,
idle_task_cx: TaskContext,
}
impl Processor {
pub fn new() -> Self {
Self {
current: None,
idle_task_cx: TaskContext::default(),
}
}
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
pub fn take_current(&mut self) -> Option<Arc<Task>> {
self.current.take()
}
pub fn current(&self) -> Option<Arc<Task>> {
self.current.as_ref().map(Arc::clone)
}
pub fn set_current_task(&mut self, task: Arc<Task>) {
self.current = Some(task.clone());
}
}
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn take_current_task() -> Option<Arc<Task>> {
PROCESSOR.exclusive_access().take_current()
}
pub fn current_task() -> Option<Arc<Task>> {
PROCESSOR.exclusive_access().current()
}
/// call this function to switch to other task by using GLOBAL_SCHEDULER
///
/// if current task is none, then it will use the default task context
///
/// if current task status is exit, then it will not add to the scheduler
///
/// before context switch, current task will switch to the next task
pub fn schedule() {
let next_task = fetch_task().expect("no more task found");
let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_exclusive_access().ctx as *const TaskContext;
let current_task: Arc<Task>;
let current_task_cx_ptr = if current_task_option.is_none(){
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()
}else{
current_task = current_task_option.unwrap();
if current_task.status() != TaskStatus::Exited{
GLOBAL_SCHEDULER.exclusive_access().enqueue(current_task.clone());
}
&mut current_task.inner_exclusive_access().ctx as *mut TaskContext
};
// change the current task to the next task
PROCESSOR.exclusive_access().current = Some(next_task.clone());
unsafe {
context_switch(current_task_cx_ptr, next_task_cx_ptr);
}
}

View File

@ -1,19 +1,59 @@
use crate::prelude::*;
use crate::task::Task;
use crate::{prelude::*, UPSafeCell};
use lazy_static::lazy_static;
use super::processor::current_task;
use super::task::{context_switch, TaskContext};
lazy_static! {
pub static ref GLOBAL_SCHEDULER: UPSafeCell<GlobalScheduler> =
unsafe { UPSafeCell::new(GlobalScheduler { scheduler: None }) };
}
/// A scheduler for tasks.
///
/// An implementation of scheduler can attach scheduler-related information
/// with the `TypeMap` returned from `task.data()`.
pub trait Scheduler {
pub trait Scheduler: Sync + Send {
fn enqueue(&self, task: Arc<Task>);
fn dequeue(&self) -> Option<Arc<Task>>;
}
pub struct GlobalScheduler {
scheduler: Option<&'static dyn Scheduler>,
}
impl GlobalScheduler {
pub fn new() -> Self {
Self { scheduler: None }
}
/// dequeue a task using scheduler
/// require the scheduler is not none
pub fn dequeue(&mut self) -> Option<Arc<Task>> {
self.scheduler.take().unwrap().dequeue()
}
/// enqueue a task using scheduler
/// require the scheduler is not none
pub fn enqueue(&mut self, task: Arc<Task>) {
self.scheduler.take().unwrap().enqueue(task)
}
}
/// Set the global task scheduler.
///
/// This must be called before invoking `Task::spawn`.
pub fn set_scheduler(scheduler: &'static dyn Scheduler) {
todo!()
GLOBAL_SCHEDULER.exclusive_access().scheduler = Some(scheduler);
}
pub fn fetch_task() -> Option<Arc<Task>> {
GLOBAL_SCHEDULER.exclusive_access().dequeue()
}
pub fn add_task(task: Arc<Task>) {
GLOBAL_SCHEDULER.exclusive_access().enqueue(task);
}

View File

@ -0,0 +1,24 @@
.text
.global context_switch
context_switch: # (cur: *mut TaskContext, nxt: *TaskContext)
# Save cur's register
mov rax, [rsp] # return address
mov [rdi + 56], rax # 56 = offsetof(Context, rip)
mov [rdi + 0], rsp
mov [rdi + 8], rbx
mov [rdi + 16], rbp
mov [rdi + 24], r12
mov [rdi + 32], r13
mov [rdi + 40], r14
mov [rdi + 48], r15
# Restore nxt's registers
mov rsp, [rsi + 0]
mov rbx, [rsi + 8]
mov rbp, [rsi + 16]
mov r12, [rsi + 24]
mov r13, [rsi + 32]
mov r14, [rsi + 40]
mov r15, [rsi + 48]
mov rax, [rsi + 56] # restore return address
mov [rsp], rax # for stack balance, must use mov instead of push
ret

View File

@ -1,17 +1,69 @@
use crate::prelude::*;
use core::cell::RefMut;
use core::intrinsics::unreachable;
use core::mem::size_of;
use crate::trap::CalleeRegs;
use crate::user::UserSpace;
use crate::{prelude::*, UPSafeCell, println};
use super::processor::{current_task, schedule};
use super::scheduler::add_task;
core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct TaskContext {
pub regs: CalleeRegs,
pub rip: usize,
}
extern "C" {
pub fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext);
}
/// 8*PAGE_SIZE
#[cfg(debug_assertions)]
pub const TASK_SIZE: usize = 32768;
/// 2*PAGE_SIZE
#[cfg(not(debug_assertions))]
pub const TASK_SIZE: usize = 8192;
#[cfg(debug_assertions)]
#[repr(align(32768))]
struct TaskAlign;
#[cfg(not(debug_assertions))]
#[repr(C, align(8192))]
struct TaskAlign;
pub const KERNEL_STACK_SIZE: usize =
TASK_SIZE - size_of::<Box<dyn Fn()>>() - size_of::<Box<dyn Any + Send + Sync>>() - size_of::<Option<Arc<UserSpace>>>()
- size_of::<UPSafeCell<TaskInner>>() - size_of::<usize>();
/// A task that executes a function to the end.
pub struct Task {
func: Box<dyn FnOnce()>,
_align: TaskAlign,
func: Box<dyn Fn() + Send + Sync>,
data: Box<dyn Any + Send + Sync>,
user_space: Option<UserSpace>,
user_space: Option<Arc<UserSpace>>,
task_inner: UPSafeCell<TaskInner>,
exit_code: usize,
kstack: [u8; KERNEL_STACK_SIZE],
}
pub struct TaskInner {
pub task_status: TaskStatus,
pub ctx: TaskContext,
}
impl Task {
/// Gets the current task.
pub fn current() -> Arc<Task> {
todo!()
current_task().unwrap()
}
/// get inner
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskInner> {
self.task_inner.exclusive_access()
}
/// Yields execution so that another task may be scheduled.
@ -33,32 +85,71 @@ impl Task {
user_space: Option<Arc<UserSpace>>,
) -> Result<Arc<Self>>
where
F: FnOnce(),
F: Fn() + Send + Sync + 'static,
T: Any + Send + Sync,
{
todo!()
/// all task will entering this function
/// this function is mean to executing the task_fn in Task
fn kernel_task_entry(){
let current_task = current_task().expect("no current task, it should have current task in kernel task entry");
current_task.func.call(())
}
let result = Self {
func: Box::new(task_fn),
data: Box::new(task_data),
user_space,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
})
},
_align: TaskAlign,
exit_code:0,
kstack: [0; KERNEL_STACK_SIZE],
};
result.task_inner.exclusive_access().ctx.rip = kernel_task_entry as usize;
let arc_self = Arc::new(result);
add_task(arc_self.clone());
schedule();
Ok(arc_self)
}
/// Returns the task status.
pub fn status(&self) -> TaskStatus {
todo!()
self.task_inner.exclusive_access().task_status
}
/// Returns the task data.
pub fn data(&self) -> &dyn Any {
todo!()
&self.data
}
/// Returns the user space of this task, if it has.
pub fn user_space(&self) -> Option<&Arc<UserSpace>> {
todo!()
if self.user_space.is_some() {
Some(self.user_space.as_ref().unwrap())
} else {
None
}
}
pub fn exit(&self)->!{
self.inner_exclusive_access().task_status = TaskStatus::Exited;
schedule();
unreachable!()
}
}
#[derive(Clone, Copy,PartialEq, Eq, PartialOrd, Ord)]
/// The status of a task.
pub enum TaskStatus {
/// The task is runnable.
Runnable,
/// The task is running.
Running,
/// The task is sleeping.
Sleeping,
/// The task has exited.

View File

@ -0,0 +1,46 @@
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct CallerRegs {
pub rax: usize,
pub rcx: usize,
pub rdx: usize,
pub rsi: usize,
pub rdi: usize,
pub r8: usize,
pub r9: usize,
pub r10: usize,
pub r11: usize,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct CalleeRegs {
pub rsp: usize,
pub rbx: usize,
pub rbp: usize,
pub r12: usize,
pub r13: usize,
pub r14: usize,
pub r15: usize,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct SyscallFrame {
pub caller: CallerRegs,
pub callee: CalleeRegs,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct TrapFrame {
pub regs: CallerRegs,
pub id: usize,
pub err: usize,
// Pushed by CPU
pub rip: usize,
pub cs: usize,
pub rflags: usize,
pub rsp: usize,
pub ss: usize,
}

View File

@ -1,15 +1,20 @@
//! User space.
use crate::cpu::CpuContext;
use crate::prelude::*;
use crate::task::Task;
use crate::vm::VmSpace;
use crate::{prelude::*};
/// A user space.
///
/// Each user space has a VM address space and allows a task to execute in
/// user mode.
pub struct UserSpace {}
pub struct UserSpace {
/// vm space
vm_space: VmSpace,
/// cpu context before entering user space
cpu_ctx: CpuContext,
}
impl UserSpace {
/// Creates a new instance.
@ -17,12 +22,15 @@ impl UserSpace {
/// Each instance maintains a VM address space and the CPU state to enable
/// execution in the user space.
pub fn new(vm_space: VmSpace, cpu_ctx: CpuContext) -> Self {
todo!()
Self {
vm_space: vm_space,
cpu_ctx: cpu_ctx,
}
}
/// Returns the VM address space.
pub fn vm_space(&self) -> &VmSpace {
todo!()
&self.vm_space
}
/// Returns the user mode that is bound to the current task and user space.
@ -34,7 +42,7 @@ impl UserSpace {
/// This method is intended to only allow each task to have at most one
/// instance of `UserMode` initiated. If this method is called again before
/// the first instance for the current task is dropped, then the method
/// panics.
/// panics.
pub fn user_mode(&self) -> UserMode<'_> {
todo!()
}

View File

@ -1,9 +1,11 @@
use core::iter::Iterator;
use crate::prelude::*;
use crate::{config::PAGE_SIZE, mm::address::PhysAddr, prelude::*, Error, UPSafeCell};
use super::VmIo;
use crate::mm::PhysFrame;
/// A collection of page frames (physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
@ -24,91 +26,154 @@ impl VmFrameVec {
///
/// For more information, see `VmAllocOptions`.
pub fn allocate(options: &VmAllocOptions) -> Result<Self> {
todo!()
let page_size = options.page_size;
let mut frame_list = Vec::new();
for i in 0..page_size {
let vm_frame = VmFrame::alloc();
if vm_frame.is_none() {
return Err(Error::NoMemory);
}
frame_list.push(vm_frame.unwrap());
}
Ok(Self(frame_list))
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
todo!()
self.0.push(new_frame);
}
/// Pop a frame from the collection.
pub fn pop(&mut self) -> Option<VmFrame> {
todo!()
self.0.pop()
}
/// Removes a frame at a position.
pub fn remove(&mut self, at: usize) -> VmFrame {
todo!()
self.0.remove(at)
}
/// Append some frames.
pub fn append(&mut self, more: VmFrameVec) -> Result<()> {
todo!()
pub fn append(&mut self, more: &mut VmFrameVec) -> Result<()> {
self.0.append(&mut more.0);
Ok(())
}
/// Truncate some frames.
///
/// If `new_len >= self.len()`, then this method has no effect.
pub fn truncate(&mut self, new_len: usize) {
todo!()
if new_len >= self.0.len() {
return;
}
self.0.truncate(new_len)
}
/// Returns an iterator
pub fn iter(&self) -> VmFrameVecIter<'_> {
todo!()
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
self.0.iter()
}
/// Returns the number of frames.
pub fn len(&self) -> usize {
todo!()
self.0.len()
}
/// Returns whether the frame collection is empty.
pub fn is_empty(&self) -> bool {
todo!()
self.0.is_empty()
}
/// Returns the number of bytes.
///
/// This method is equivalent to `self.len() * PAGE_SIZE`.
pub fn nbytes(&self) -> usize {
todo!()
self.0.len() * PAGE_SIZE
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
let mut start = offset;
let mut remain = buf.len();
let mut processed = 0;
for pa in self.0.iter() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &mut buf[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
src.copy_from_slice(dst);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
break;
}
}
}
Ok(())
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()> {
let mut start = offset;
let mut remain = buf.len();
let mut processed = 0;
for pa in self.0.iter_mut() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &buf[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
break;
}
}
}
Ok(())
}
}
/// An iterator for frames.
pub struct VmFrameVecIter<'a> {
frames: &'a VmFrameVec,
current: usize,
// more...
}
impl<'a> VmFrameVecIter<'a> {
pub fn new(frames: &'a VmFrameVec) -> Self {
Self { frames, current: 0 }
}
}
impl<'a> Iterator for VmFrameVecIter<'a> {
type Item = &'a VmFrame;
fn next(&mut self) -> Option<Self::Item> {
todo!()
if self.current >= self.frames.0.len() {
return None;
}
Some(self.frames.0.get(self.current).unwrap())
}
}
/// Options for allocating physical memory pages (or frames).
/// See `VmFrameVec::alloc`.
pub struct VmAllocOptions {}
pub struct VmAllocOptions {
page_size: usize,
}
impl VmAllocOptions {
/// Creates new options for allocating the specified number of frames.
pub fn new(len: usize) -> Self {
todo!()
Self { page_size: len }
}
/// Sets the physical address of the first frame.
@ -140,6 +205,7 @@ impl VmAllocOptions {
}
}
#[derive(Debug)]
/// A handle to a page frame.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
@ -150,7 +216,9 @@ impl VmAllocOptions {
/// when all instances of `VmFrame` that refer to the
/// same page frame are dropped, the page frame will be freed.
/// Free page frames are allocated in bulk by `VmFrameVec::allocate`.
pub struct VmFrame {}
pub struct VmFrame {
pub physical_frame: UPSafeCell<Arc<PhysFrame>>,
}
impl VmFrame {
/// Creates a new VmFrame.
@ -158,13 +226,41 @@ impl VmFrame {
/// # Safety
///
/// The given physical address must be valid for use.
pub(crate) unsafe fn new(paddr: Paddr) -> Self {
todo!()
pub(crate) unsafe fn new(physical_frame: PhysFrame) -> Self {
Self {
physical_frame: UPSafeCell::new(Arc::new(physical_frame)),
}
}
/// Allocate a new VmFrame
pub(crate) fn alloc() -> Option<Self> {
let phys = PhysFrame::alloc();
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: unsafe { UPSafeCell::new(Arc::new(phys.unwrap())) },
})
}
/// Allocate a new VmFrame filled with zero
pub(crate) fn alloc_zero() -> Option<Self> {
let phys = PhysFrame::alloc_zero();
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: unsafe { UPSafeCell::new(Arc::new(phys.unwrap())) },
})
}
/// Returns the physical address of the page frame.
pub fn paddr(&self) -> Paddr {
todo!()
self.physical_frame.exclusive_access().start_pa().0
}
pub fn start_pa(&self) -> PhysAddr {
self.physical_frame.exclusive_access().start_pa()
}
/// Returns whether the page frame is accessible by DMA.
@ -178,12 +274,16 @@ impl VmFrame {
impl Clone for VmFrame {
fn clone(&self) -> Self {
todo!("inc ref cnt")
Self {
physical_frame: unsafe {
UPSafeCell::new(self.physical_frame.exclusive_access().clone())
},
}
}
}
impl Drop for VmFrame {
fn drop(&mut self) {
todo!("dec ref cnt and if zero, free the page frame")
drop(self.physical_frame.exclusive_access())
}
}

View File

@ -44,10 +44,10 @@ pub trait VmIo: Send + Sync {
/// On success, the input `buf` must be written to the VM object entirely.
/// If, for any reason, the input data can only be written partially,
/// then the method shall return an error.
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()>;
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()>;
/// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
fn write_val<T: Pod>(&mut self, offset: usize, new_val: &T) -> Result<()> {
self.write_bytes(offset, new_val.as_bytes())?;
Ok(())
}
@ -57,7 +57,7 @@ pub trait VmIo: Send + Sync {
/// # No short write
///
/// Similar to `write_bytes`.
fn write_slice<T: Pod>(&self, offset: usize, slice: &[T]) -> Result<()> {
fn write_slice<T: Pod>(&mut self, offset: usize, slice: &[T]) -> Result<()> {
let buf = unsafe { core::mem::transmute(slice) };
self.write_bytes(offset, buf)
}

View File

@ -6,9 +6,6 @@ pub type Vaddr = usize;
/// Physical addresses.
pub type Paddr = usize;
/// The size of a VM page or a page frame.
pub const PAGE_SIZE: usize = 0x1000; // 4KB
mod frame;
mod io;
mod pod;

View File

@ -1,8 +1,12 @@
use crate::config::PAGE_SIZE;
use crate::{UPSafeCell, println};
use bitflags::bitflags;
use core::ops::Range;
use crate::prelude::*;
use crate::mm::address::{is_aligned, VirtAddr};
use crate::mm::{MapArea, MemorySet, PTFlags};
use crate::vm::VmFrameVec;
use crate::{prelude::*, Error};
use super::VmIo;
@ -17,20 +21,50 @@ use super::VmIo;
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`VmFrames`) to the `VmSpace`.
pub struct VmSpace {}
pub struct VmSpace {
memory_set: UPSafeCell<MemorySet>,
}
impl VmSpace {
/// Creates a new VM address space.
pub fn new() -> Self {
todo!()
Self {
memory_set: unsafe { UPSafeCell::new(MemorySet::zero()) },
}
}
pub fn activate(&self) {
self.memory_set.exclusive_access().activate();
}
/// Maps some physical memory pages into the VM space according to the given
/// options, returning the address where the mapping is created.
///
/// the frames in variable frames will delete after executing this function
///
/// For more information, see `VmMapOptions`.
pub fn map(&self, frames: VmFrameVec, options: &VmMapOptions) -> Result<Vaddr> {
todo!()
let mut flags = PTFlags::PRESENT;
if options.perm.contains(VmPerm::W) {
flags.insert(PTFlags::WRITABLE);
}
if options.perm.contains(VmPerm::U) {
flags.insert(PTFlags::USER);
}
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
self.memory_set
.exclusive_access()
.pt
.map_area(&mut MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len()*PAGE_SIZE,
flags,
frames,
));
Ok(options.addr.unwrap())
}
/// Unmaps the physical memory pages within the VM address range.
@ -38,7 +72,18 @@ impl VmSpace {
/// The range is allowed to contain gaps, where no physical memory pages
/// are mapped.
pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> {
todo!()
assert!(is_aligned(range.start) && is_aligned(range.end));
let mut start_va = VirtAddr(range.start);
let page_size = (range.end - range.start) / PAGE_SIZE;
let mut inner = self.memory_set.exclusive_access();
for i in 0..page_size {
let res = inner.unmap(start_va);
if res.is_err() {
return res;
}
start_va += PAGE_SIZE;
}
Ok(())
}
/// Update the VM protection permissions within the VM address range.
@ -58,22 +103,30 @@ impl Default for VmSpace {
impl VmIo for VmSpace {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
self.memory_set.exclusive_access().read_bytes(offset, buf)
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()> {
self.memory_set.exclusive_access().write_bytes(offset, buf)
}
}
/// Options for mapping physical memory pages into a VM address space.
/// See `VmSpace::map`.
pub struct VmMapOptions {}
pub struct VmMapOptions {
/// start virtual address
addr: Option<Vaddr>,
/// permission
perm: VmPerm,
}
impl VmMapOptions {
/// Creates the default options.
pub fn new() -> Self {
todo!()
Self {
addr: None,
perm: VmPerm::empty(),
}
}
/// Sets the alignment of the address of the mapping.
@ -91,14 +144,19 @@ impl VmMapOptions {
///
/// The default value of this option is read-only.
pub fn perm(&mut self, perm: VmPerm) -> &mut Self {
todo!()
self.perm = perm;
self
}
/// Sets the address of the new mapping.
///
/// The default value of this option is `None`.
pub fn addr(&mut self, addr: Option<Vaddr>) -> &mut Self {
todo!()
if addr == None {
return self;
}
self.addr = Some(addr.unwrap());
self
}
/// Sets whether the mapping can overwrite any existing mappings.
@ -126,6 +184,8 @@ bitflags! {
const W = 0b00000010;
/// Executable.
const X = 0b00000100;
/// User
const U = 0b00001000;
/// Readable + writable.
const RW = Self::R.bits | Self::W.bits;
/// Readable + execuable.

View File

@ -15,3 +15,4 @@ xmas-elf = "0.8.0"
# data-structures
intrusive-collections = "0.9.4"
spin = "0.9.4"

View File

@ -130,7 +130,7 @@ impl<'a> ElfLoadInfo<'a> {
// allocate frames
let page_number = vm_page_range.len();
let options = VmAllocOptions::new(page_number);
let frames = VmFrameVec::allocate(&options)?;
let mut frames = VmFrameVec::allocate(&options)?;
for segment in self.segments.iter().filter(|segment| segment.is_loadable()) {
let start_address = segment.start_address();

View File

@ -2,8 +2,9 @@
use core::ops::Range;
use alloc::vec;
use kxos_frame::vm::{
Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace, PAGE_SIZE,
use kxos_frame::{
config::PAGE_SIZE,
vm::{Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
};
/// A set of **CONTINUOUS** virtual pages in VmSpace
@ -54,7 +55,7 @@ impl<'a> VmPageRange<'a> {
/// map self to a set of zeroed frames
pub fn map_zeroed(&mut self, vm_space: &'a VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let mut frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let buffer = vec![0u8; self.nbytes()];
frames.write_bytes(0, &buffer).expect("write zero failed");
self.map_to(vm_space, frames, vm_perm)

View File

@ -1,17 +1,17 @@
use alloc::{boxed::Box, collections::VecDeque, sync::Arc};
use kxos_frame::sync::SpinLock;
use kxos_frame::task::{set_scheduler, Scheduler, Task};
use spin::mutex::SpinMutex;
pub const TASK_INIT_CAPABILITY: usize = 16;
pub struct FifoScheduler {
tasks: SpinLock<VecDeque<Arc<Task>>>,
tasks: SpinMutex<VecDeque<Arc<Task>>>,
}
impl FifoScheduler {
pub fn new() -> Self {
Self {
tasks: SpinLock::new(VecDeque::with_capacity(TASK_INIT_CAPABILITY)),
tasks: SpinMutex::new(VecDeque::with_capacity(TASK_INIT_CAPABILITY)),
}
}
}

View File

@ -3,7 +3,7 @@ use kxos_frame::{
cpu::CpuContext,
task::Task,
user::{UserEvent, UserSpace},
vm::VmSpace,
vm::VmSpace, println,
};
use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler};
@ -34,6 +34,8 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
}
}
// QEMU crash when entering the task spawn function.
println!("[kxos std]:before entering task spawn");
// FIXME: set the correct type when task has no data
Task::spawn(user_task_entry, None::<u8>, Some(user_space)).expect("spawn user task failed.")
}

View File

@ -2,7 +2,7 @@
#![no_main]
#![feature(custom_test_frameworks)]
#![forbid(unsafe_code)]
// #![feature(default_alloc_error_handler)]
extern crate kxos_frame;
use bootloader::{entry_point, BootInfo};

View File

@ -9,6 +9,11 @@
"executables": true,
"linker-flavor": "ld.lld",
"linker": "rust-lld",
"pre-link-args": {
"ld.lld": [
"-Tkxos-frame/src/linker.ld"
]
},
"panic-strategy": "abort",
"disable-redzone": true,
"features": "-mmx,-sse,+soft-float"