Fix heap lock issues

This commit is contained in:
Ruihan Li 2026-01-29 21:32:51 +08:00 committed by Tate, Hongliang Tian
parent 6d2ff13a63
commit 894f6ba9b4
7 changed files with 41 additions and 21 deletions

View File

@ -40,9 +40,12 @@ impl FileOps for MapsFileOps {
let fs_ref = current.as_posix_thread().unwrap().read_fs();
let path_resolver = fs_ref.resolver().read();
// To maintain a consistent lock order and avoid race conditions, we must lock the heap
// before querying the VMAR.
let heap_guard = vmar.process_vm().heap().lock();
let guard = vmar.query(VMAR_LOWEST_ADDR..VMAR_CAP_ADDR);
for vm_mapping in guard.iter() {
vm_mapping.print_to_maps(&mut printer, vmar, &path_resolver)?;
vm_mapping.print_to_maps(&mut printer, vmar, &heap_guard, &path_resolver)?;
}
Ok(printer.bytes_written())

View File

@ -38,7 +38,7 @@ pub use process::{
Terminal, broadcast_signal_async, enqueue_signal_async, spawn_init_process,
};
pub use process_filter::ProcessFilter;
pub use process_vm::{INIT_STACK_SIZE, ProcessVm};
pub use process_vm::{INIT_STACK_SIZE, LockedHeap, ProcessVm};
pub use rlimit::ResourceType;
pub use stats::collect_process_creation_count;
pub use term_status::TermStatus;

View File

@ -19,6 +19,11 @@ pub struct Heap {
inner: Mutex<Option<HeapInner>>,
}
#[derive(Debug)]
pub struct LockedHeap<'a> {
inner: MutexGuard<'a, Option<HeapInner>>,
}
#[derive(Clone, Debug)]
struct HeapInner {
/// The size of the data segment, used for rlimit checking.
@ -35,6 +40,15 @@ impl Heap {
}
}
/// Creates a new `Heap` with identical contents of an existing one.
pub(super) fn fork_from(heap_guard: &LockedHeap) -> Self {
let inner = heap_guard.inner.as_ref().expect("Heap is not initialized");
Self {
inner: Mutex::new(Some(inner.clone())),
}
}
/// Initializes and maps the heap virtual memory.
pub(super) fn map_and_init_heap(
&self,
@ -75,11 +89,11 @@ impl Heap {
Ok(())
}
/// Returns the current heap range.
pub fn heap_range(&self) -> Range<Vaddr> {
let inner = self.inner.lock();
let inner = inner.as_ref().expect("Heap is not initialized");
inner.heap_range.clone()
/// Locks the heap and returns a guard to access the heap information.
pub fn lock(&self) -> LockedHeap<'_> {
LockedHeap {
inner: self.inner.lock(),
}
}
/// Modifies the end address of the heap.
@ -136,11 +150,12 @@ impl Heap {
}
}
impl Clone for Heap {
fn clone(&self) -> Self {
Self {
inner: Mutex::new(self.inner.lock().clone()),
}
impl LockedHeap<'_> {
/// Returns the current heap range.
pub fn heap_range(&self) -> &Range<Vaddr> {
let inner = self.inner.as_ref().expect("Heap is not initialized");
&inner.heap_range
}
}

View File

@ -18,7 +18,7 @@ use core::sync::atomic::{AtomicUsize, Ordering};
use ostd::{sync::MutexGuard, task::disable_preempt};
pub use self::{
heap::Heap,
heap::{Heap, LockedHeap},
init_stack::{
INIT_STACK_SIZE, InitStack, InitStackReader, MAX_LEN_STRING_ARG, MAX_NR_STRING_ARGS,
aux_vec::{AuxKey, AuxVec},
@ -86,10 +86,10 @@ impl ProcessVm {
}
/// Creates a new `ProcessVm` with identical contents of an existing one.
pub fn fork_from(process_vm: &Self) -> Self {
pub fn fork_from(process_vm: &Self, heap_guard: &LockedHeap) -> Self {
Self {
init_stack: process_vm.init_stack.clone(),
heap: process_vm.heap.clone(),
heap: Heap::fork_from(heap_guard),
executable_file: process_vm.executable_file.clone(),
#[cfg(target_arch = "riscv64")]
vdso_base: AtomicUsize::new(process_vm.vdso_base.load(Ordering::Relaxed)),

View File

@ -18,7 +18,7 @@ pub fn sys_brk(heap_end: u64, ctx: &Context) -> Result<SyscallReturn> {
Some(addr) => user_heap
.modify_heap_end(addr, ctx)
.unwrap_or_else(|cur_heap_end| cur_heap_end),
None => user_heap.heap_range().end,
None => user_heap.lock().heap_range().end,
};
Ok(SyscallReturn::Return(current_heap_end as _))

View File

@ -25,6 +25,7 @@ use crate::{
utils::Inode,
},
prelude::*,
process::LockedHeap,
thread::exception::PageFaultInfo,
vm::{
perms::VmPerms,
@ -219,6 +220,7 @@ impl VmMapping {
&self,
printer: &mut VmPrinter,
parent_vmar: &Vmar,
parent_heap_guard: &LockedHeap,
path_resolver: &PathResolver,
) -> Result<()> {
let start = self.map_to_addr;
@ -263,12 +265,11 @@ impl VmMapping {
let name = || {
let process_vm = parent_vmar.process_vm();
let user_stack_top = process_vm.init_stack().user_stack_top();
if self.map_to_addr <= user_stack_top && self.map_end() > user_stack_top {
return Some(Cow::Borrowed("[stack]"));
}
let heap_range = process_vm.heap().heap_range();
let heap_range = parent_heap_guard.heap_range();
if self.map_to_addr >= heap_range.start && self.map_end() <= heap_range.end {
return Some(Cow::Borrowed("[heap]"));
}

View File

@ -19,13 +19,14 @@ impl Vmar {
/// Creates a new VMAR whose content is inherited from another
/// using copy-on-write (COW) technique.
pub fn fork_from(vmar: &Self) -> Result<Arc<Self>> {
// Obtain the heap lock and hold it for the entire method to avoid race conditions.
let heap_guard = vmar.process_vm.heap().lock();
let new_vmar = Arc::new(Vmar {
inner: RwMutex::new(VmarInner::new()),
vm_space: Arc::new(VmSpace::new()),
rss_counters: array::from_fn(|_| PerCpuCounter::new()),
// FIXME: There are race conditions because `process_vm` is not operating under the
// `vmar.inner` lock.
process_vm: ProcessVm::fork_from(&vmar.process_vm),
process_vm: ProcessVm::fork_from(&vmar.process_vm, &heap_guard),
});
{