Combine PhysFrame and VmFrame into one

This commit is contained in:
Yuke Peng 2023-03-17 19:17:03 -07:00 committed by Tate, Hongliang Tian
parent 8dc7bda147
commit 90a390ecda
8 changed files with 117 additions and 182 deletions

View File

@ -1,4 +1,7 @@
use core::iter::Iterator;
use core::{
iter::Iterator,
ops::{BitAnd, BitOr, Not},
};
use crate::{config::PAGE_SIZE, prelude::*, Error};
use pod::Pod;
@ -6,7 +9,7 @@ use pod::Pod;
use super::{Paddr, VmIo};
use alloc::vec;
use super::frame_allocator::PhysFrame;
use super::frame_allocator;
/// A collection of page frames (physical memory pages).
///
@ -35,7 +38,7 @@ impl VmFrameVec {
if options.paddr.is_some() {
panic!("not support contiguous paddr");
}
let frames = VmFrame::alloc_continuous(options.page_size);
let frames = frame_allocator::alloc_continuous(options.page_size);
if frames.is_none() {
return Err(Error::NoMemory);
}
@ -43,9 +46,9 @@ impl VmFrameVec {
}
for i in 0..page_size {
let vm_frame = if let Some(paddr) = options.paddr {
VmFrame::alloc_with_paddr(paddr + i * PAGE_SIZE)
frame_allocator::alloc_with_paddr(paddr + i * PAGE_SIZE)
} else {
VmFrame::alloc()
frame_allocator::alloc()
};
if vm_frame.is_none() {
return Err(Error::NoMemory);
@ -68,7 +71,7 @@ impl VmFrameVec {
/// get the end pa of the collection
pub fn end_pa(&self) -> Option<Paddr> {
if let Some(frame) = self.0.last() {
Some(frame.paddr() + PAGE_SIZE)
Some(frame.start_pa() + PAGE_SIZE)
} else {
None
}
@ -258,6 +261,12 @@ impl VmAllocOptions {
}
}
bitflags::bitflags! {
pub(crate) struct VmFrameFlags : usize{
const NEED_DEALLOC = 1<<63;
}
}
#[derive(Debug)]
/// A handle to a page frame.
///
@ -270,13 +279,13 @@ impl VmAllocOptions {
/// same page frame are dropped, the page frame will be freed.
/// Free page frames are allocated in bulk by `VmFrameVec::allocate`.
pub struct VmFrame {
pub(crate) physical_frame: Arc<PhysFrame>,
pub(crate) frame_index: Arc<Paddr>,
}
impl Clone for VmFrame {
fn clone(&self) -> Self {
Self {
physical_frame: self.physical_frame.clone(),
frame_index: self.frame_index.clone(),
}
}
}
@ -287,69 +296,20 @@ impl VmFrame {
/// # Safety
///
/// The given physical address must be valid for use.
pub(crate) unsafe fn new(physical_frame: PhysFrame) -> Self {
pub(crate) unsafe fn new(paddr: Paddr, flags: VmFrameFlags) -> Self {
assert_eq!(paddr % PAGE_SIZE, 0);
Self {
physical_frame: Arc::new(physical_frame),
frame_index: Arc::new((paddr / PAGE_SIZE).bitor(flags.bits)),
}
}
/// Allocate a new VmFrame
pub(crate) fn alloc() -> Option<Self> {
let phys = PhysFrame::alloc();
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
/// Allocate contiguous VmFrame
pub(crate) fn alloc_continuous(frame_count: usize) -> Option<Vec<Self>> {
let phys = PhysFrame::alloc_continuous_range(frame_count);
if phys.is_none() {
return None;
}
let mut res = Vec::new();
for i in phys.unwrap() {
res.push(Self {
physical_frame: Arc::new(i),
})
}
Some(res)
}
/// Allocate a new VmFrame filled with zero
pub(crate) fn alloc_zero() -> Option<Self> {
let phys = PhysFrame::alloc();
if phys.is_none() {
return None;
}
unsafe {
core::ptr::write_bytes(
super::phys_to_virt(phys.as_ref().unwrap().start_pa()) as *mut u8,
0,
PAGE_SIZE,
)
};
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
pub(crate) fn alloc_with_paddr(paddr: Paddr) -> Option<Self> {
let phys = PhysFrame::alloc_with_paddr(paddr);
if phys.is_none() {
return None;
}
Some(Self {
physical_frame: Arc::new(phys.unwrap()),
})
}
/// Returns the physical address of the page frame.
pub fn paddr(&self) -> Paddr {
self.physical_frame.start_pa()
pub fn start_pa(&self) -> Paddr {
self.frame_index() * PAGE_SIZE
}
pub fn end_pa(&self) -> Paddr {
(self.frame_index() + 1) * PAGE_SIZE
}
/// fill the frame with zero
@ -363,14 +323,6 @@ impl VmFrame {
}
}
pub fn start_pa(&self) -> Paddr {
self.physical_frame.start_pa()
}
pub fn end_pa(&self) -> Paddr {
self.physical_frame.end_pa()
}
/// Returns whether the page frame is accessible by DMA.
///
/// In a TEE environment, DMAable pages are untrusted pages shared with
@ -379,6 +331,14 @@ impl VmFrame {
todo!()
}
fn need_dealloc(&self) -> bool {
(*self.frame_index & VmFrameFlags::NEED_DEALLOC.bits()) != 0
}
fn frame_index(&self) -> usize {
(*self.frame_index).bitand(VmFrameFlags::all().bits().not())
}
pub unsafe fn as_slice(&self) -> &mut [u8] {
core::slice::from_raw_parts_mut(super::phys_to_virt(self.start_pa()) as *mut u8, PAGE_SIZE)
}
@ -407,15 +367,25 @@ impl VmIo for VmFrame {
/// Read a value of a specified type at a specified offset.
fn read_val<T: Pod>(&self, offset: usize) -> Result<T> {
let paddr = self.paddr() + offset;
let paddr = self.start_pa() + offset;
let val = unsafe { &mut *(super::phys_to_virt(paddr) as *mut T) };
Ok(*val)
}
/// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
let paddr = self.paddr() + offset;
let paddr = self.start_pa() + offset;
unsafe { (super::phys_to_virt(paddr) as *mut T).write(*new_val) };
Ok(())
}
}
impl Drop for VmFrame {
fn drop(&mut self) {
if self.need_dealloc() && Arc::strong_count(&self.frame_index) == 1 {
unsafe {
frame_allocator::dealloc(self.frame_index());
}
}
}
}

View File

@ -1,90 +1,68 @@
use core::ops::{BitAnd, BitOr, Not};
use alloc::vec::Vec;
use buddy_system_allocator::FrameAllocator;
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType};
use log::info;
use spin::{Mutex, Once};
use crate::{config::PAGE_SIZE, vm::Paddr};
use crate::{config::PAGE_SIZE, vm::Paddr, AlignExt};
use super::{frame::VmFrameFlags, VmFrame};
static FRAME_ALLOCATOR: Once<Mutex<FrameAllocator>> = Once::new();
bitflags::bitflags! {
struct PhysFrameFlags : usize{
const NEED_DEALLOC = 1<<63;
}
pub fn alloc() -> Option<VmFrame> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(1)
.map(|pa| unsafe { VmFrame::new(pa * PAGE_SIZE, VmFrameFlags::NEED_DEALLOC) })
}
#[derive(Debug, Clone)]
// #[repr(transparent)]
pub struct PhysFrame {
frame_index: usize,
}
impl PhysFrame {
pub const fn start_pa(&self) -> Paddr {
self.frame_index() * PAGE_SIZE
}
pub const fn end_pa(&self) -> Paddr {
(self.frame_index() + 1) * PAGE_SIZE
}
pub fn alloc() -> Option<Self> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(1)
.map(|pa| Self {
frame_index: pa.bitor(PhysFrameFlags::NEED_DEALLOC.bits()),
})
}
pub fn alloc_continuous_range(frame_count: usize) -> Option<Vec<Self>> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(frame_count)
.map(|start| {
let mut vector = Vec::new();
pub fn alloc_continuous(frame_count: usize) -> Option<Vec<VmFrame>> {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(frame_count)
.map(|start| {
let mut vector = Vec::new();
unsafe {
for i in 0..frame_count {
vector.push(Self {
frame_index: (start + i).bitor(PhysFrameFlags::NEED_DEALLOC.bits()),
})
vector.push(VmFrame::new(
(start + i) * PAGE_SIZE,
VmFrameFlags::NEED_DEALLOC,
))
}
vector
})
}
pub fn alloc_with_paddr(paddr: Paddr) -> Option<Self> {
// FIXME: need to check whether the physical address is invalid or not
Some(Self {
frame_index: paddr / PAGE_SIZE,
}
vector
})
}
}
const fn need_dealloc(&self) -> bool {
(self.frame_index & PhysFrameFlags::NEED_DEALLOC.bits()) != 0
}
const fn frame_index(&self) -> usize {
self.frame_index.bitand(PhysFrameFlags::all().bits().not())
pub fn alloc_with_paddr(paddr: Paddr) -> Option<VmFrame> {
// FIXME: need to check whether the physical address is invalid or not
unsafe {
Some(VmFrame::new(
paddr.align_down(PAGE_SIZE),
VmFrameFlags::empty(),
))
}
}
impl Drop for PhysFrame {
fn drop(&mut self) {
if self.need_dealloc() {
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(self.frame_index, 1);
}
}
pub(crate) fn alloc_zero() -> Option<VmFrame> {
let frame = alloc()?;
frame.zero();
Some(frame)
}
/// Dealloc a frame.
///
/// # Safety
///
/// User should ensure the index is valid
///
pub(crate) unsafe fn dealloc(index: usize) {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(index, 1);
}
pub(crate) fn init(regions: &Vec<&LimineMemmapEntry>) {

View File

@ -1,13 +1,15 @@
use super::page_table::{PTFlags, PageTable};
use crate::{
config::PAGE_SIZE,
vm::is_aligned,
vm::is_page_aligned,
vm::{VmFrame, VmFrameVec},
};
use crate::{prelude::*, Error};
use alloc::collections::{btree_map::Entry, BTreeMap};
use core::fmt;
use super::frame_allocator;
pub struct MapArea {
pub flags: PTFlags,
pub start_va: Vaddr,
@ -29,7 +31,7 @@ impl MapArea {
pub fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = VmFrame::alloc().unwrap();
let new = frame_allocator::alloc().unwrap();
unsafe {
new.as_slice().copy_from_slice(old.as_slice());
}
@ -46,7 +48,7 @@ impl MapArea {
/// This function will map the vitural address to the given physical frames
pub fn new(start_va: Vaddr, size: usize, flags: PTFlags, physical_frames: VmFrameVec) -> Self {
assert!(
is_aligned(start_va) && is_aligned(size) && physical_frames.len() == (size / PAGE_SIZE)
is_page_aligned(start_va) && is_page_aligned(size) && physical_frames.len() == (size / PAGE_SIZE)
);
let mut map_area = Self {
@ -69,23 +71,20 @@ impl MapArea {
}
pub fn map_with_physical_address(&mut self, va: Vaddr, pa: VmFrame) -> Paddr {
assert!(is_aligned(va));
assert!(is_page_aligned(va));
match self.mapper.entry(va) {
Entry::Occupied(e) => panic!("already mapped a input physical address"),
Entry::Vacant(e) => e.insert(pa).physical_frame.start_pa(),
Entry::Vacant(e) => e.insert(pa).start_pa(),
}
}
pub fn map(&mut self, va: Vaddr) -> Paddr {
assert!(is_aligned(va));
assert!(is_page_aligned(va));
match self.mapper.entry(va) {
Entry::Occupied(e) => e.get().physical_frame.start_pa(),
Entry::Vacant(e) => e
.insert(VmFrame::alloc_zero().unwrap())
.physical_frame
.start_pa(),
Entry::Occupied(e) => e.get().start_pa(),
Entry::Vacant(e) => e.insert(frame_allocator::alloc_zero().unwrap()).start_pa(),
}
}

View File

@ -39,20 +39,8 @@ pub const fn virt_to_phys(va: usize) -> usize {
va - PHYS_OFFSET
}
pub const fn align_down(p: usize) -> usize {
p & !(PAGE_SIZE - 1)
}
pub const fn align_up(p: usize) -> usize {
(p + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
}
pub const fn page_offset(p: usize) -> usize {
p & (PAGE_SIZE - 1)
}
pub const fn is_aligned(p: usize) -> bool {
page_offset(p) == 0
pub const fn is_page_aligned(p: usize) -> bool {
(p & (PAGE_SIZE - 1)) == 0
}
/// Only available inside jinux-frame

View File

@ -1,11 +1,11 @@
use super::{
align_down,
frame_allocator,
memory_set::MapArea,
{Paddr, Vaddr},
};
use crate::{
config::{ENTRY_COUNT, PAGE_SIZE, PHYS_OFFSET},
vm::VmFrame,
vm::VmFrame, AlignExt,
};
use alloc::{collections::BTreeMap, vec, vec::Vec};
use core::{fmt, panic};
@ -81,7 +81,7 @@ pub struct PageTable {
impl PageTable {
pub fn new() -> Self {
let root_frame = VmFrame::alloc_zero().unwrap();
let root_frame = frame_allocator::alloc_zero().unwrap();
let p4 = table_of(root_frame.start_pa());
let map_pte = ALL_MAPPED_PTE.lock();
for (index, pte) in map_pte.iter() {
@ -98,7 +98,7 @@ impl PageTable {
if !entry.is_unused() {
panic!("{:#x?} is mapped before mapping", va);
}
*entry = PageTableEntry::new_page(align_down(pa), flags);
*entry = PageTableEntry::new_page(pa.align_down(PAGE_SIZE), flags);
}
pub fn unmap(&mut self, va: Vaddr) {
@ -137,7 +137,7 @@ impl PageTable {
impl PageTable {
fn alloc_table(&mut self) -> Paddr {
let frame = VmFrame::alloc_zero().unwrap();
let frame = frame_allocator::alloc_zero().unwrap();
let pa = frame.start_pa();
self.tables.push(frame);
pa

View File

@ -6,7 +6,7 @@ use spin::Mutex;
use x86_64::structures::paging::PhysFrame;
use super::VmFrameVec;
use super::{is_aligned, Vaddr};
use super::{is_page_aligned, Vaddr};
use super::{MapArea, MemorySet};
use crate::{prelude::*, Error};
@ -80,7 +80,7 @@ impl VmSpace {
/// The range is allowed to contain gaps, where no physical memory pages
/// are mapped.
pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> {
assert!(is_aligned(range.start) && is_aligned(range.end));
assert!(is_page_aligned(range.start) && is_page_aligned(range.end));
let mut start_va = range.start;
let page_size = (range.end - range.start) / PAGE_SIZE;
let mut inner = self.memory_set.lock();

View File

@ -96,7 +96,7 @@ impl VirtQueue {
.remove(0);
cfg.write_at(
offset_of!(VitrioPciCommonCfg, queue_desc),
frame.paddr() as u64,
frame.start_pa() as u64,
);
debug!("queue_desc vm frame:{:x?}", frame);
frame_vec.push(frame);
@ -108,7 +108,7 @@ impl VirtQueue {
.remove(0);
cfg.write_at(
offset_of!(VitrioPciCommonCfg, queue_driver),
frame.paddr() as u64,
frame.start_pa() as u64,
);
debug!("queue_driver vm frame:{:x?}", frame);
frame_vec.push(frame);
@ -120,7 +120,7 @@ impl VirtQueue {
.remove(0);
cfg.write_at(
offset_of!(VitrioPciCommonCfg, queue_device),
frame.paddr() as u64,
frame.start_pa() as u64,
);
debug!("queue_device vm frame:{:x?}", frame);
frame_vec.push(frame);

View File

@ -24,7 +24,7 @@ impl<T: Pod> InFramePtr<T> {
options.paddr(Some(page_paddr));
VmFrameVec::allocate(&options)?.remove(0)
};
let offset = paddr - frame.paddr();
let offset = paddr - frame.start_pa();
Ok(Self {
frame,
offset,
@ -49,6 +49,6 @@ impl<T: Pod> InFramePtr<T> {
}
pub fn paddr(&self) -> usize {
self.offset + self.frame.paddr()
self.offset + self.frame.start_pa()
}
}