load elf file content; init first process

This commit is contained in:
jiang jianfeng 2022-08-17 14:48:01 +08:00
parent 2142d69a60
commit 40f5b81252
20 changed files with 783 additions and 13 deletions

45
src/Cargo.lock generated
View File

@ -2,6 +2,12 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bit_field"
version = "0.10.1"
@ -44,6 +50,15 @@ version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e63201c624b8c8883921b1a1accc8916c4fa9dbfb15d122b26e4dde945b86bbf"
[[package]]
name = "intrusive-collections"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfe531a7789d7120f3e17d4f3f2cd95f54418ba7354f60b7b622b6644a07888a"
dependencies = [
"memoffset",
]
[[package]]
name = "json"
version = "0.12.4"
@ -56,6 +71,7 @@ version = "0.1.0"
dependencies = [
"bootloader",
"kxos-frame",
"kxos-std",
]
[[package]]
@ -83,6 +99,11 @@ dependencies = [
[[package]]
name = "kxos-std"
version = "0.1.0"
dependencies = [
"intrusive-collections",
"kxos-frame",
"xmas-elf",
]
[[package]]
name = "lazy_static"
@ -102,6 +123,15 @@ dependencies = [
"json",
]
[[package]]
name = "memoffset"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg",
]
[[package]]
name = "rustversion"
version = "1.0.9"
@ -137,3 +167,18 @@ dependencies = [
"rustversion",
"volatile",
]
[[package]]
name = "xmas-elf"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d29b4d8e7beaceb4e77447ba941a7600d23d0319ab52da0461abea214832d5a"
dependencies = [
"zero",
]
[[package]]
name = "zero"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

View File

@ -6,6 +6,7 @@ edition = "2021"
[dependencies]
bootloader = {version="0.10.12"}
kxos-frame = {path = "kxos-frame"}
kxos-std = {path = "kxos-std"}
[workspace]

View File

@ -20,7 +20,9 @@ pub mod task;
pub mod timer;
pub mod user;
mod util;
pub mod log;
pub mod vm;
pub mod sync;
pub use self::error::Error;
use alloc::sync::Arc;

62
src/kxos-frame/src/log.rs Normal file
View File

@ -0,0 +1,62 @@
use core::fmt::Arguments;
/// Print log message
/// This function should *NOT* be directly called.
/// Instead, print logs with macros.
#[doc(hidden)]
pub fn log_print(args: Arguments) {
todo!()
}
/// This macro should not be directly called.
#[macro_export]
macro_rules! log_print {
($($arg:tt)*) => {
(kxos_frame::log::log_print(format_args!($($arg)*)))
};
}
#[macro_export]
macro_rules! trace {
($($arg:tt)*) => {
$crate::log_print!("[trace]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
};
}
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => {
$crate::log_print!("[debug]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
};
}
#[macro_export]
macro_rules! info {
($($arg:tt)*) => {
($crate::log_print!("[info]:"));
($crate::log_print!($($arg)*));
($crate::log_print!("\n"));
};
}
#[macro_export]
macro_rules! warn {
($($arg:tt)*) => {
$crate::log_print!("[warn]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
};
}
#[macro_export]
macro_rules! error {
($($arg:tt)*) => {
$crate::log_print!("[error]:");
$crate::log_print!($($arg)*);
$crate::log_print!("\n");
};
}

View File

@ -1,3 +1,5 @@
use core::ops::{Deref, DerefMut};
/// A spin lock.
pub struct SpinLock<T: ?Sized> {
val: T,
@ -13,7 +15,7 @@ impl<T> SpinLock<T> {
///
/// This method runs in a busy loop until the lock can be acquired.
/// After acquiring the spin lock, all interrupts are disabled.
pub fn lock(&self) -> SpinLockGuard<'a> {
pub fn lock<'a>(&self) -> SpinLockGuard<'a, T> {
todo!()
}
}
@ -34,6 +36,12 @@ impl<'a, T> Deref for SpinLockGuard<'a, T> {
}
}
impl <'a, T> DerefMut for SpinLockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
todo!()
}
}
impl<'a, T: ?Sized> !Send for SpinLockGuard<'a, T> {}
unsafe impl<T: ?Sized + Sync> Sync for SpinLockGuard<'_, T> {}

View File

@ -1,3 +1,2 @@
mod type_map;
pub use self::type_map::TypeMap;

View File

@ -2,6 +2,8 @@ use core::iter::Iterator;
use crate::prelude::*;
use super::VmIo;
/// A collection of page frames (physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
@ -75,6 +77,16 @@ impl VmFrameVec {
}
}
impl VmIo for VmFrameVec {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
}
}
/// An iterator for frames.
pub struct VmFrameVecIter<'a> {
frames: &'a VmFrameVec,

View File

@ -4,6 +4,8 @@ use core::ops::Range;
use crate::prelude::*;
use crate::vm::VmFrameVec;
use super::VmIo;
/// Virtual memory space.
///
/// A virtual memory space (`VmSpace`) can be created and assigned to a user space so that
@ -54,6 +56,16 @@ impl Default for VmSpace {
}
}
impl VmIo for VmSpace {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
todo!()
}
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
}
}
/// Options for mapping physical memory pages into a VM address space.
/// See `VmSpace::map`.
pub struct VmMapOptions {}

View File

@ -6,3 +6,12 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
kxos-frame = {path = "../kxos-frame"}
# parse elf file
xmas-elf = "0.8.0"
# goblin = {version= "0.5.3", default-features = false, features = ["elf64"]}
# data-structures
intrusive-collections = "0.9.4"

View File

@ -1,14 +1,28 @@
pub fn add(left: usize, right: usize) -> usize {
left + right
//! The std library of kxos
#![no_std]
#![forbid(unsafe_code)]
#![allow(dead_code)]
#![allow(unused_variables)]
#![feature(const_btree_new)]
use process::Process;
extern crate alloc;
mod memory;
mod process;
mod syscall;
mod util;
pub fn init() {
process::fifo_scheduler::init();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
}
pub fn run_first_process() {
let elf_file_content = read_elf_content();
Process::spawn_from_elf(elf_file_content);
}
fn read_elf_content<'a>() -> &'a [u8]{
todo!()
}

View File

@ -0,0 +1,230 @@
use core::ops::Range;
use alloc::vec::Vec;
use kxos_frame::{
vm::{Vaddr, VmIo, VmPerm, VmSpace, VmFrameVec, VmAllocOptions},
Error,
};
use xmas_elf::{
header,
program::{self, ProgramHeader, SegmentData},
ElfFile,
};
use super::{user_stack::UserStack, vm_page::VmPageRange};
pub struct ElfLoadInfo<'a> {
entry_point: Vaddr,
segments: Vec<ElfSegment<'a>>,
user_stack: UserStack,
}
pub struct ElfSegment<'a> {
range: Range<Vaddr>,
data: &'a [u8],
type_: program::Type,
}
impl<'a> ElfSegment<'a> {
fn parse_elf_segment(
segment: ProgramHeader<'a>,
elf_file: &ElfFile<'a>,
) -> Result<Self, ElfError> {
let start = segment.virtual_addr() as Vaddr;
let end = start + segment.mem_size() as Vaddr;
let type_ = match segment.get_type() {
Err(error_msg) => return Err(ElfError::from(error_msg)),
Ok(type_) => type_,
};
let data = match read_segment_data(segment, elf_file) {
Err(_) => return Err(ElfError::from("")),
Ok(data) => data,
};
Ok(Self {
range: start..end,
type_,
data,
})
}
pub fn is_loadable(&self) -> bool {
self.type_ == program::Type::Load
}
pub fn start_address(&self) -> Vaddr {
self.range.start
}
pub fn end_address(&self) -> Vaddr {
self.range.end
}
}
impl<'a> ElfLoadInfo<'a> {
fn with_capacity(entry_point: Vaddr, capacity: usize, user_stack: UserStack) -> Self {
Self {
entry_point,
segments: Vec::with_capacity(capacity),
user_stack,
}
}
fn add_segment(&mut self, elf_segment: ElfSegment<'a>) {
self.segments.push(elf_segment);
}
pub fn parse_elf_data(elf_file_content: &'a [u8]) -> Result<Self, ElfError> {
let elf_file = match ElfFile::new(elf_file_content) {
Err(error_msg) => return Err(ElfError::from(error_msg)),
Ok(elf_file) => elf_file,
};
check_elf_header(&elf_file)?;
// init elf load info
let entry_point = elf_file.header.pt2.entry_point() as Vaddr;
// FIXME: only contains load segment?
let segments_count = elf_file.program_iter().count();
let user_stack = UserStack::new_default_config();
let mut elf_load_info = ElfLoadInfo::with_capacity(entry_point, segments_count, user_stack);
// parse each segemnt
for segment in elf_file.program_iter() {
let elf_segment = ElfSegment::parse_elf_segment(segment, &elf_file)?;
if elf_segment.is_loadable() {
elf_load_info.add_segment(elf_segment)
}
}
Ok(elf_load_info)
}
fn vm_page_range(&self) -> Result<VmPageRange, ElfError> {
let elf_start_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.start_address())
.min()
.unwrap();
let elf_end_address = self
.segments
.iter()
.filter(|segment| segment.is_loadable())
.map(|segment| segment.end_address())
.max()
.unwrap();
Ok(VmPageRange::new_range(elf_start_address..elf_end_address))
}
pub fn map_self(&self, vm_space: &VmSpace, frames: VmFrameVec) -> Result<(), ElfError> {
let mut vm_page_range = self.vm_page_range()?;
let vm_perm = ElfLoadInfo::perm();
vm_page_range.map_to(vm_space, frames, vm_perm);
Ok(())
}
pub fn copy_elf(&self) -> Result<VmFrameVec, ElfError> {
let vm_page_range = self.vm_page_range()?;
// calculate offset
let offset = vm_page_range.start_address();
// allocate frames
let page_number = vm_page_range.len();
let options = VmAllocOptions::new(page_number);
let frames = VmFrameVec::allocate(&options)?;
for segment in self.segments.iter().filter(|segment| segment.is_loadable()) {
let start_address = segment.start_address();
frames.write_bytes(start_address - offset, segment.data)?;
}
Ok(frames)
}
pub fn map_and_clear_user_stack(&self, vm_space: &VmSpace) {
self.user_stack.map_and_zeroed(vm_space);
}
/// return the perm of elf pages
/// FIXME: Set the correct permission bit of user pages.
fn perm() -> VmPerm {
VmPerm::RX
}
pub fn entry_point(&self) -> u64 {
self.entry_point as u64
}
pub fn user_stack_bottom(&self) -> u64 {
self.user_stack.stack_bottom as u64
}
}
fn check_elf_header(elf_file: &ElfFile) -> Result<(), ElfError> {
let elf_header = elf_file.header;
// 64bit
debug_assert_eq!(elf_header.pt1.class(), header::Class::SixtyFour);
if elf_header.pt1.class() != header::Class::SixtyFour {
return Err(ElfError::UnsupportedElfType);
}
// little endian
debug_assert_eq!(elf_header.pt1.data(), header::Data::LittleEndian);
if elf_header.pt1.data() != header::Data::LittleEndian {
return Err(ElfError::UnsupportedElfType);
}
// system V ABI
debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
return Err(ElfError::UnsupportedElfType);
}
// x86_64 architecture
debug_assert_eq!(
elf_header.pt2.machine().as_machine(),
header::Machine::X86_64
);
if elf_header.pt2.machine().as_machine() != header::Machine::X86_64 {
return Err(ElfError::UnsupportedElfType);
}
// Executable file
debug_assert_eq!(elf_header.pt2.type_().as_type(), header::Type::Executable);
if elf_header.pt2.type_().as_type() != header::Type::Executable {
return Err(ElfError::UnsupportedElfType);
}
Ok(())
}
#[derive(Debug)]
pub enum ElfError {
FrameError(Error),
NoSegment,
UnsupportedElfType,
WithInfo(&'static str),
}
impl From<&'static str> for ElfError {
fn from(error_info: &'static str) -> Self {
ElfError::WithInfo(error_info)
}
}
impl From<Error> for ElfError {
fn from(frame_error: Error) -> Self {
ElfError::FrameError(frame_error)
}
}
fn read_segment_data<'a>(
segment: ProgramHeader<'a>,
elf_file: &ElfFile<'a>,
) -> Result<&'a [u8], ()> {
match segment.get_data(&elf_file) {
Err(_) => Err(()),
Ok(data) => {
if let SegmentData::Undefined(data) = data {
Ok(data)
} else {
Err(())
}
}
}
}

View File

@ -0,0 +1,22 @@
pub mod elf;
pub mod user_stack;
pub mod vm_page;
use kxos_frame::vm::VmSpace;
use self::elf::{ElfError, ElfLoadInfo};
/// load elf to a given vm_space. this function will
/// 1. read the vaddr of each segment to get all elf pages.
/// 2. allocate physical frames and copy elf data to these frames
/// 3. map frames to the correct vaddr
/// 4. (allocate frams and) map the user stack
pub fn load_elf_to_vm_space<'a>(
elf_file_content: &'a [u8],
vm_space: &VmSpace,
) -> Result<ElfLoadInfo<'a>, ElfError> {
let elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content)?;
let to_frames = elf_load_info.copy_elf()?;
elf_load_info.map_self(vm_space, to_frames)?;
elf_load_info.map_and_clear_user_stack(vm_space);
Ok(elf_load_info)
}

View File

@ -0,0 +1,43 @@
use alloc::sync::Arc;
use kxos_frame::vm::{Vaddr, VmPerm, VmSpace};
use super::vm_page::VmPageRange;
pub const USER_STACK_BASE: Vaddr = 0x0000_0000_1000_0000;
pub const USER_STACK_SIZE: usize = 0x1000 * 16; // 64KB
pub struct UserStack {
pub stack_bottom: Vaddr,
stack_size: usize,
vm_space: Option<Arc<VmSpace>>,
}
impl UserStack {
// initialize user stack on fixed position
pub const fn new(stack_bottom: Vaddr, stack_size: usize) -> Self {
Self {
stack_bottom,
stack_size,
vm_space: None,
}
}
pub const fn new_default_config() -> Self {
Self {
stack_bottom: USER_STACK_BASE,
stack_size: USER_STACK_SIZE,
vm_space: None,
}
}
pub fn map_and_zeroed(&self, vm_space: &VmSpace) {
let mut vm_page_range =
VmPageRange::new_range(self.stack_bottom..(self.stack_bottom + self.stack_size));
let vm_perm = UserStack::perm();
vm_page_range.map_zeroed(vm_space, vm_perm);
}
pub const fn perm() -> VmPerm {
VmPerm::RW
}
}

View File

@ -0,0 +1,117 @@
//! A Page in virtual address space
use core::ops::Range;
use alloc::vec;
use kxos_frame::vm::{
Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace, PAGE_SIZE,
};
/// A set of **CONTINUOUS** virtual pages in VmSpace
pub struct VmPageRange<'a> {
start_page: VmPage,
end_page: VmPage,
vm_space: Option<&'a VmSpace>,
}
impl<'a> VmPageRange<'a> {
/// create a set of pages containing virtual address range [a, b)
pub const fn new_range(vaddr_range: Range<Vaddr>) -> Self {
let start_page = VmPage::containing_address(vaddr_range.start);
let end_page = VmPage::containing_address(vaddr_range.end - 1);
Self {
start_page,
end_page,
vm_space: None,
}
}
/// returns the page containing the specific vaddr
pub const fn containing_address(vaddr: Vaddr) -> Self {
let page = VmPage::containing_address(vaddr);
Self {
start_page: page,
end_page: page,
vm_space: None,
}
}
pub const fn start_address(&self) -> Vaddr {
self.start_page.start_address()
}
/// the address right after the end page
pub const fn end_address(&self) -> Vaddr {
self.end_page.start_address() + PAGE_SIZE
}
/// allocate a set of physical frames and map self to frames
pub fn map(&mut self, vm_space: &'a VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
self.map_to(vm_space, frames, vm_perm);
}
/// map self to a set of zeroed frames
pub fn map_zeroed(&mut self, vm_space: &'a VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let buffer = vec![0u8; self.nbytes()];
frames.write_bytes(0, &buffer).expect("write zero failed");
self.map_to(vm_space, frames, vm_perm)
}
/// map self to a set of frames
pub fn map_to(&mut self, vm_space: &'a VmSpace, frames: VmFrameVec, vm_perm: VmPerm) {
assert_eq!(self.len(), frames.len());
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(frames, &vm_map_options).expect("map failed");
self.vm_space = Some(vm_space)
}
pub fn unmap(&mut self) {
if self.is_mapped() {
let vm_space = self.vm_space.take().unwrap();
vm_space
.unmap(&(self.start_address()..self.end_address()))
.expect("unmap failed");
}
}
pub fn is_mapped(&self) -> bool {
if let None = self.vm_space {
false
} else {
true
}
}
/// return the number of virtual pages
pub const fn len(&self) -> usize {
self.end_page.vpn - self.start_page.vpn + 1
}
pub const fn nbytes(&self) -> usize {
self.len() * PAGE_SIZE
}
}
/// A Virtual Page
#[derive(Debug, Clone, Copy)]
pub struct VmPage {
/// Virtual Page Number
vpn: usize,
}
impl VmPage {
const fn containing_address(vaddr: Vaddr) -> Self {
Self {
vpn: vaddr / PAGE_SIZE,
}
}
const fn start_address(&self) -> Vaddr {
self.vpn * PAGE_SIZE
}
}

View File

@ -0,0 +1,33 @@
use alloc::{boxed::Box, collections::VecDeque, sync::Arc};
use kxos_frame::task::{set_scheduler, Scheduler, Task};
use kxos_frame::sync::SpinLock;
pub const TASK_INIT_CAPABILITY: usize = 16;
pub struct FifoScheduler {
tasks: SpinLock<VecDeque<Arc<Task>>>,
}
impl FifoScheduler {
pub fn new() -> Self {
Self {
tasks: SpinLock::new(VecDeque::with_capacity(TASK_INIT_CAPABILITY)),
}
}
}
impl Scheduler for FifoScheduler {
fn enqueue(&self, task: Arc<Task>) {
self.tasks.lock().push_back(task.clone());
}
fn dequeue(&self) -> Option<Arc<Task>> {
self.tasks.lock().pop_front()
}
}
pub fn init() {
let fifo_scheduler = Box::new(FifoScheduler::new());
let scheduler = Box::<FifoScheduler>::leak(fifo_scheduler);
set_scheduler(scheduler);
}

View File

@ -0,0 +1,37 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use alloc::sync::Arc;
// use kxos_frame::{sync::SpinLock, task::Task, user::UserSpace};
use kxos_frame::task::Task;
use self::task::spawn_user_task_from_elf;
pub mod fifo_scheduler;
pub mod task;
// static PROCESSES: SpinLock<BTreeMap<usize, Arc<Process>>> = SpinLock::new(BTreeMap::new());
static PID_ALLOCATOR: AtomicUsize = AtomicUsize::new(0);
/// Process stands for a set of tasks that shares the same userspace.
/// Currently, we only support one task inside a process.
pub struct Process {
pid: usize,
task: Arc<Task>,
exit_code: i32,
// user_space: Option<Arc<UserSpace>>,
// TODO: childs, parent, files,
}
impl Process {
pub fn spawn_from_elf(elf_file_content: &[u8]) -> Self {
let pid = new_pid();
let task = spawn_user_task_from_elf(elf_file_content);
let exit_code = 0;
Self { pid, task, exit_code }
}
}
/// create a new pid for new process
fn new_pid() -> usize {
PID_ALLOCATOR.fetch_add(1, Ordering::Release)
}

View File

@ -0,0 +1,39 @@
use alloc::sync::Arc;
use kxos_frame::{cpu::CpuContext, task::Task, user::{UserSpace, UserEvent}, vm::VmSpace};
use crate::{memory::load_elf_to_vm_space, syscall::syscall_handler};
pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
let vm_space = VmSpace::new();
let elf_load_info = load_elf_to_vm_space(elf_file_content, &vm_space).expect("Load Elf failed");
let mut cpu_ctx = CpuContext::default();
// FIXME: correct regs?
// set entry point
cpu_ctx.gp_regs.rip = elf_load_info.entry_point();
// set user stack
cpu_ctx.gp_regs.rsp = elf_load_info.user_stack_bottom();
let user_space = Arc::new(UserSpace::new(vm_space, cpu_ctx));
fn user_task_entry() {
let cur = Task::current();
let user_space = cur.user_space().expect("user task should have user space");
let mut user_mode = user_space.user_mode();
loop {
let user_event = user_mode.execute();
let context = user_mode.context_mut();
handle_user_event(user_event, context);
}
}
// FIXME: set the correct type when task has no data
Task::spawn(user_task_entry, None::<u8>, Some(user_space)).expect("spawn user task failed.")
}
fn handle_user_event(user_event: UserEvent, context: &mut CpuContext) {
match user_event {
UserEvent::Syscall => syscall_handler(context),
UserEvent::Fault => todo!(),
UserEvent::Exception => todo!(),
}
}

View File

@ -0,0 +1,81 @@
use alloc::vec;
use alloc::{sync::Arc, vec::Vec};
use kxos_frame::Error;
use kxos_frame::cpu::CpuContext;
use kxos_frame::{task::Task, user::UserSpace, vm::VmIo};
use kxos_frame::info;
const SYS_WRITE: u64 = 64;
const SYS_EXIT: u64 = 93;
pub struct SyscallFrame {
syscall_number: u64,
args: [u64; 6],
}
impl SyscallFrame {
fn new_from_context(context: &CpuContext) -> Self {
let syscall_number = context.gp_regs.rax;
let mut args = [0u64; 6];
args[0] = context.gp_regs.rdi;
args[1] = context.gp_regs.rsi;
args[2] = context.gp_regs.rdx;
args[3] = context.gp_regs.r10;
args[4] = context.gp_regs.r8;
args[5] = context.gp_regs.r9;
Self {
syscall_number, args,
}
}
}
pub fn syscall_handler(context: &mut CpuContext) {
let syscall_frame = SyscallFrame::new_from_context(context);
let syscall_return = syscall_dispatch(syscall_frame.syscall_number, syscall_frame.args);
// FIXME: set return value?
context.gp_regs.rax = syscall_return as u64;
}
pub fn syscall_dispatch(syscall_number: u64, args: [u64; 6]) -> isize {
match syscall_number {
SYS_WRITE => sys_write(args[0], args[1], args[2]),
SYS_EXIT => sys_exit(args[0] as _),
_ => panic!("Unsupported syscall number: {}", syscall_number),
}
}
pub fn sys_write(fd: u64, user_buf_ptr: u64, user_buf_len: u64) -> isize {
// only suppprt STDOUT now.
const STDOUT: u64 = 1;
if fd == STDOUT {
let task = Task::current();
let user_space = task.user_space().expect("No user space attached");
let user_buffer = copy_bytes_from_user(user_space, user_buf_ptr as usize, user_buf_len as usize)
.expect("read user buffer failed");
let content = alloc::str::from_utf8(user_buffer.as_slice()).expect("Invalid content");
// TODO: print content
info!("Message from user mode: {}", content);
0
} else {
panic!("Unsupported fd number {}", fd);
}
}
pub fn sys_exit(exit_code: i32) -> isize {
// let current = Task::current();
// current.exit(exit_code);
todo!()
}
fn copy_bytes_from_user(
user_space: &Arc<UserSpace>,
user_buf_ptr: usize,
user_buf_len: usize,
) -> Result<Vec<u8>, Error> {
let vm_space = user_space.vm_space();
let mut buffer = vec![0u8; user_buf_len];
vm_space.read_bytes(user_buf_ptr, &mut buffer)?;
Ok(buffer)
}

View File

@ -0,0 +1 @@

View File

@ -21,6 +21,9 @@ fn kernel_main(boot_info: &'static mut BootInfo) -> ! {
kxos_frame::init(boot_info);
println!("finish init kxos_frame");
kxos_std::init();
kxos_std::run_first_process();
loop {}
}