Use `MaybeUninit` to store metadata vtable
This commit is contained in:
parent
1a5bf7b0ad
commit
a2863d6db3
|
|
@ -38,7 +38,7 @@ pub mod mapping {
|
|||
use core::{
|
||||
any::Any,
|
||||
cell::UnsafeCell,
|
||||
mem::size_of,
|
||||
mem::{size_of, MaybeUninit},
|
||||
sync::atomic::{AtomicU32, Ordering},
|
||||
};
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ pub(in crate::mm) struct MetaSlot {
|
|||
/// The reference count of the page.
|
||||
pub(super) ref_count: AtomicU32,
|
||||
/// The virtual table that indicates the type of the metadata.
|
||||
pub(super) vtable_ptr: UnsafeCell<PageMetaVtablePtr>,
|
||||
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<PageMetaVtablePtr>>,
|
||||
}
|
||||
|
||||
type PageMetaVtablePtr = core::ptr::DynMetadata<dyn PageMeta>;
|
||||
|
|
@ -130,18 +130,31 @@ pub use impl_page_meta;
|
|||
/// page should have a last handle to the page, and the page is about to be dropped,
|
||||
/// as the metadata slot after this operation becomes uninitialized.
|
||||
pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
|
||||
// This would be guaranteed as a safety requirement.
|
||||
debug_assert_eq!((*ptr).ref_count.load(Ordering::Relaxed), 0);
|
||||
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
|
||||
// immutable reference to it is always safe.
|
||||
let slot = unsafe { &*ptr };
|
||||
|
||||
// This should be guaranteed as a safety requirement.
|
||||
debug_assert_eq!(slot.ref_count.load(Ordering::Relaxed), 0);
|
||||
|
||||
let paddr = mapping::meta_to_page::<PagingConsts>(ptr as Vaddr);
|
||||
|
||||
let meta_ptr: *mut dyn PageMeta = core::ptr::from_raw_parts_mut(ptr, *(*ptr).vtable_ptr.get());
|
||||
// SAFETY: We have exclusive access to the page metadata.
|
||||
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
|
||||
// SAFETY: The page metadata is initialized and valid.
|
||||
let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
|
||||
|
||||
// Let the custom dropper handle the drop.
|
||||
(*meta_ptr).on_drop(paddr);
|
||||
let meta_ptr: *mut dyn PageMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr);
|
||||
|
||||
// Drop the metadata.
|
||||
core::ptr::drop_in_place(meta_ptr);
|
||||
// SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
|
||||
// `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
|
||||
// access to the page metadata.
|
||||
unsafe {
|
||||
// Invoke the custom `on_drop` handler.
|
||||
(*meta_ptr).on_drop(paddr);
|
||||
// Drop the page metadata.
|
||||
core::ptr::drop_in_place(meta_ptr);
|
||||
}
|
||||
|
||||
// Deallocate the page.
|
||||
// It would return the page to the allocator for further use. This would be done
|
||||
|
|
@ -153,6 +166,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
|
|||
.lock()
|
||||
.dealloc(paddr / PAGE_SIZE, 1);
|
||||
}
|
||||
|
||||
/// The metadata of pages that holds metadata of pages.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MetaPageMeta {}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ use core::{
|
|||
any::Any,
|
||||
marker::PhantomData,
|
||||
mem::ManuallyDrop,
|
||||
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
pub use cont_pages::ContPages;
|
||||
|
|
@ -65,24 +65,25 @@ impl<M: PageMeta> Page<M> {
|
|||
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||
let ptr = vaddr as *const MetaSlot;
|
||||
|
||||
// SAFETY: The aligned pointer points to a initialized `MetaSlot`.
|
||||
let ref_count = unsafe { &(*ptr).ref_count };
|
||||
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
|
||||
// immutable reference to it is always safe.
|
||||
let slot = unsafe { &*ptr };
|
||||
|
||||
ref_count
|
||||
slot.ref_count
|
||||
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed)
|
||||
.expect("Page already in use when trying to get a new handle");
|
||||
|
||||
// SAFETY: The aligned pointer points to a initialized `MetaSlot`.
|
||||
let vtable_ptr = unsafe { (*ptr).vtable_ptr.get() };
|
||||
// SAFETY: We have exclusive access to the page metadata.
|
||||
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
|
||||
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn PageMeta));
|
||||
|
||||
// SAFETY: The pointer is valid and we have the exclusive access.
|
||||
unsafe { vtable_ptr.write(core::ptr::metadata(&metadata as &dyn PageMeta)) };
|
||||
|
||||
// Initialize the metadata
|
||||
// SAFETY: The pointer points to the first byte of the `MetaSlot`
|
||||
// structure, and layout ensured enough space for `M`. The original
|
||||
// value does not represent any object that's needed to be dropped.
|
||||
unsafe { (ptr as *mut M).write(metadata) };
|
||||
// SAFETY:
|
||||
// 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the
|
||||
// metadata storage.
|
||||
// 2. The size and the alignment of the metadata storage is large enough to hold `M`
|
||||
// (guaranteed by the safety requirement of the `PageMeta` trait).
|
||||
// 3. We have exclusive access to the metadata storage (guaranteed by the reference count).
|
||||
unsafe { ptr.cast::<M>().cast_mut().write(metadata) };
|
||||
|
||||
Self {
|
||||
ptr,
|
||||
|
|
@ -149,7 +150,10 @@ impl<M: PageMeta> Page<M> {
|
|||
|
||||
/// Get the metadata of this page.
|
||||
pub fn meta(&self) -> &M {
|
||||
unsafe { &*(self.ptr as *const M) }
|
||||
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
|
||||
// borrowed as `M` because the type is correct, it lives under the given lifetime, and no
|
||||
// one will mutably borrow the page metadata after initialization.
|
||||
unsafe { &*self.ptr.cast() }
|
||||
}
|
||||
|
||||
/// Get the reference count of the page.
|
||||
|
|
@ -164,17 +168,19 @@ impl<M: PageMeta> Page<M> {
|
|||
/// reference count can be changed by other threads at any time including
|
||||
/// potentially between calling this method and acting on the result.
|
||||
pub fn reference_count(&self) -> u32 {
|
||||
self.ref_count().load(Ordering::Relaxed)
|
||||
self.slot().ref_count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn ref_count(&self) -> &AtomicU32 {
|
||||
unsafe { &(*self.ptr).ref_count }
|
||||
fn slot(&self) -> &MetaSlot {
|
||||
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
|
||||
// immutable reference to it is always safe.
|
||||
unsafe { &*self.ptr }
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> Clone for Page<M> {
|
||||
fn clone(&self) -> Self {
|
||||
self.ref_count().fetch_add(1, Ordering::Relaxed);
|
||||
self.slot().ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
Self {
|
||||
ptr: self.ptr,
|
||||
_marker: PhantomData,
|
||||
|
|
@ -184,7 +190,7 @@ impl<M: PageMeta> Clone for Page<M> {
|
|||
|
||||
impl<M: PageMeta> Drop for Page<M> {
|
||||
fn drop(&mut self) {
|
||||
let last_ref_cnt = self.ref_count().fetch_sub(1, Ordering::Release);
|
||||
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
|
||||
debug_assert!(last_ref_cnt > 0);
|
||||
if last_ref_cnt == 1 {
|
||||
// A fence is needed here with the same reasons stated in the implementation of
|
||||
|
|
@ -239,13 +245,20 @@ impl DynPage {
|
|||
|
||||
/// Get the metadata of this page.
|
||||
pub fn meta(&self) -> &dyn Any {
|
||||
// SAFETY: The pointer is valid and no other writes will be done to it.
|
||||
let vtable_ptr = unsafe { *(*self.ptr).vtable_ptr.get() };
|
||||
let slot = self.slot();
|
||||
|
||||
// SAFETY: The page metadata is valid to be borrowed immutably, since it will never be
|
||||
// borrowed mutably after initialization.
|
||||
let vtable_ptr = unsafe { &*slot.vtable_ptr.get() };
|
||||
|
||||
// SAFETY: The page metadata is initialized and valid.
|
||||
let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
|
||||
|
||||
let meta_ptr: *const dyn PageMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr);
|
||||
|
||||
// SAFETY: The pointer is valid and the type is correct for the stored
|
||||
// metadata.
|
||||
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
|
||||
// borrowed under `vtable_ptr` because the vtable is correct, it lives under the given
|
||||
// lifetime, and no one will mutably borrow the page metadata after initialization.
|
||||
(unsafe { &*meta_ptr }) as &dyn Any
|
||||
}
|
||||
|
||||
|
|
@ -264,8 +277,10 @@ impl DynPage {
|
|||
PAGE_SIZE
|
||||
}
|
||||
|
||||
fn ref_count(&self) -> &AtomicU32 {
|
||||
unsafe { &(*self.ptr).ref_count }
|
||||
fn slot(&self) -> &MetaSlot {
|
||||
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
|
||||
// immutable reference to it is always safe.
|
||||
unsafe { &*self.ptr }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -306,14 +321,14 @@ impl From<Frame> for DynPage {
|
|||
|
||||
impl Clone for DynPage {
|
||||
fn clone(&self) -> Self {
|
||||
self.ref_count().fetch_add(1, Ordering::Relaxed);
|
||||
self.slot().ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
Self { ptr: self.ptr }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DynPage {
|
||||
fn drop(&mut self) {
|
||||
let last_ref_cnt = self.ref_count().fetch_sub(1, Ordering::Release);
|
||||
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
|
||||
debug_assert!(last_ref_cnt > 0);
|
||||
if last_ref_cnt == 1 {
|
||||
// A fence is needed here with the same reasons stated in the implementation of
|
||||
|
|
@ -340,9 +355,10 @@ pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) {
|
|||
debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
|
||||
|
||||
let vaddr: Vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||
// SAFETY: The virtual address points to an initialized metadata slot.
|
||||
// SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking
|
||||
// an immutable reference to it is always safe.
|
||||
let slot = unsafe { &*(vaddr as *const MetaSlot) };
|
||||
let old = slot.ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let old = slot.ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
debug_assert!(old > 0);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue