2024-01-03 03:22:36 +00:00
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
|
2022-12-07 11:22:37 +00:00
|
|
|
//! This module is used to parse elf file content to get elf_load_info.
|
|
|
|
|
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
use core::ops::Range;
|
|
|
|
|
|
2023-03-26 05:34:42 +00:00
|
|
|
use align_ext::AlignExt;
|
2024-08-12 08:14:20 +00:00
|
|
|
use aster_rights::Full;
|
2025-05-20 03:39:51 +00:00
|
|
|
use ostd::{
|
|
|
|
|
mm::{CachePolicy, PageFlags, PageProperty, VmIo},
|
|
|
|
|
task::disable_preempt,
|
|
|
|
|
};
|
2022-12-07 11:22:37 +00:00
|
|
|
use xmas_elf::program::{self, ProgramHeader64};
|
|
|
|
|
|
2025-07-31 14:50:05 +00:00
|
|
|
use super::elf_file::ElfHeaders;
|
2024-02-25 14:09:24 +00:00
|
|
|
use crate::{
|
|
|
|
|
fs::{
|
|
|
|
|
fs_resolver::{FsPath, FsResolver, AT_FDCWD},
|
2025-08-04 03:40:23 +00:00
|
|
|
path::Path,
|
2024-02-25 14:09:24 +00:00
|
|
|
},
|
|
|
|
|
prelude::*,
|
|
|
|
|
process::{
|
2024-11-23 14:08:38 +00:00
|
|
|
posix_thread::do_exit_group,
|
2024-03-26 07:54:08 +00:00
|
|
|
process_vm::{AuxKey, AuxVec, ProcessVm},
|
2024-02-25 14:09:24 +00:00
|
|
|
TermStatus,
|
|
|
|
|
},
|
2025-04-30 02:09:38 +00:00
|
|
|
vm::{
|
|
|
|
|
perms::VmPerms,
|
|
|
|
|
util::duplicate_frame,
|
|
|
|
|
vmar::Vmar,
|
|
|
|
|
vmo::{CommitFlags, VmoRightsOp},
|
|
|
|
|
},
|
2024-02-25 14:09:24 +00:00
|
|
|
};
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2024-12-19 03:33:21 +00:00
|
|
|
/// Loads elf to the process vm.
|
2024-03-26 07:54:08 +00:00
|
|
|
///
|
|
|
|
|
/// This function will map elf segments and
|
|
|
|
|
/// initialize process init stack.
|
2023-09-01 02:25:37 +00:00
|
|
|
pub fn load_elf_to_vm(
|
|
|
|
|
process_vm: &ProcessVm,
|
2025-08-04 03:40:23 +00:00
|
|
|
elf_file: Path,
|
2023-03-29 03:35:02 +00:00
|
|
|
fs_resolver: &FsResolver,
|
2025-07-31 14:50:05 +00:00
|
|
|
elf_headers: ElfHeaders,
|
2023-03-07 09:13:35 +00:00
|
|
|
argv: Vec<CString>,
|
|
|
|
|
envp: Vec<CString>,
|
|
|
|
|
) -> Result<ElfLoadInfo> {
|
2025-07-31 14:50:05 +00:00
|
|
|
let ldso = lookup_and_parse_ldso(&elf_headers, &elf_file, fs_resolver)?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-07-31 14:50:05 +00:00
|
|
|
match init_and_map_vmos(process_vm, ldso, &elf_headers, &elf_file) {
|
2025-08-07 21:15:53 +00:00
|
|
|
#[cfg_attr(
|
|
|
|
|
not(any(target_arch = "x86_64", target_arch = "riscv64")),
|
|
|
|
|
expect(unused_mut)
|
|
|
|
|
)]
|
2025-04-07 16:09:52 +00:00
|
|
|
Ok((_range, entry_point, mut aux_vec)) => {
|
2025-08-07 21:15:53 +00:00
|
|
|
// Map the vDSO and set the entry.
|
|
|
|
|
// Since the vDSO does not require being mapped to any specific address,
|
|
|
|
|
// the vDSO is mapped after the ELF file, heap, and stack.
|
|
|
|
|
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
|
2024-03-26 07:54:08 +00:00
|
|
|
if let Some(vdso_text_base) = map_vdso_to_vm(process_vm) {
|
|
|
|
|
aux_vec
|
|
|
|
|
.set(AuxKey::AT_SYSINFO_EHDR, vdso_text_base as u64)
|
|
|
|
|
.unwrap();
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-24 01:40:37 +00:00
|
|
|
process_vm.map_and_write_init_stack(argv, envp, aux_vec)?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2024-08-09 08:11:19 +00:00
|
|
|
let user_stack_top = process_vm.user_stack_top();
|
2024-03-26 07:54:08 +00:00
|
|
|
Ok(ElfLoadInfo {
|
|
|
|
|
entry_point,
|
|
|
|
|
user_stack_top,
|
2025-04-07 16:09:52 +00:00
|
|
|
_private: (),
|
2024-03-26 07:54:08 +00:00
|
|
|
})
|
|
|
|
|
}
|
2024-05-08 10:38:36 +00:00
|
|
|
Err(err) => {
|
2024-03-26 07:54:08 +00:00
|
|
|
// Since the process_vm is in invalid state,
|
|
|
|
|
// the process cannot return to user space again,
|
|
|
|
|
// so `Vmar::clear` and `do_exit_group` are called here.
|
|
|
|
|
// FIXME: sending a fault signal is an alternative approach.
|
2025-04-06 07:10:00 +00:00
|
|
|
process_vm.lock_root_vmar().unwrap().clear().unwrap();
|
2024-03-26 07:54:08 +00:00
|
|
|
|
|
|
|
|
// FIXME: `current` macro will be used in `do_exit_group`.
|
|
|
|
|
// if the macro is used when creating the init process,
|
2023-09-01 02:25:37 +00:00
|
|
|
// the macro will panic. This corner case should be handled later.
|
|
|
|
|
// FIXME: how to set the correct exit status?
|
2023-09-06 09:32:25 +00:00
|
|
|
do_exit_group(TermStatus::Exited(1));
|
2024-05-08 10:38:36 +00:00
|
|
|
|
|
|
|
|
// The process will exit and the error code will be ignored.
|
|
|
|
|
Err(err)
|
2023-09-01 02:25:37 +00:00
|
|
|
}
|
|
|
|
|
}
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
|
|
|
|
|
2023-09-01 02:25:37 +00:00
|
|
|
fn lookup_and_parse_ldso(
|
2025-07-31 14:50:05 +00:00
|
|
|
headers: &ElfHeaders,
|
2025-08-04 03:40:23 +00:00
|
|
|
elf_file: &Path,
|
2023-03-29 03:35:02 +00:00
|
|
|
fs_resolver: &FsResolver,
|
2025-08-04 03:40:23 +00:00
|
|
|
) -> Result<Option<(Path, ElfHeaders)>> {
|
2023-04-04 03:32:52 +00:00
|
|
|
let ldso_file = {
|
2025-07-31 14:50:05 +00:00
|
|
|
let Some(ldso_path) = headers.read_ldso_path(elf_file)? else {
|
2024-06-06 08:23:23 +00:00
|
|
|
return Ok(None);
|
|
|
|
|
};
|
2025-07-31 14:50:05 +00:00
|
|
|
// Our FS requires the path to be valid UTF-8. This may be too restrictive.
|
|
|
|
|
let ldso_path = ldso_path.into_string().map_err(|_| {
|
|
|
|
|
Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"The interpreter path specified in ELF is not a valid UTF-8 string",
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
let fs_path = FsPath::new(AT_FDCWD, ldso_path.as_str())?;
|
2023-04-04 03:32:52 +00:00
|
|
|
fs_resolver.lookup(&fs_path)?
|
|
|
|
|
};
|
|
|
|
|
let ldso_elf = {
|
|
|
|
|
let mut buf = Box::new([0u8; PAGE_SIZE]);
|
2024-04-23 08:04:31 +00:00
|
|
|
let inode = ldso_file.inode();
|
2024-08-22 07:52:20 +00:00
|
|
|
inode.read_bytes_at(0, &mut *buf)?;
|
2025-07-31 14:50:05 +00:00
|
|
|
ElfHeaders::parse_elf(&*buf)?
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2024-06-06 08:23:23 +00:00
|
|
|
Ok(Some((ldso_file, ldso_elf)))
|
2023-09-01 02:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
2025-07-31 14:50:05 +00:00
|
|
|
fn load_ldso(
|
|
|
|
|
root_vmar: &Vmar<Full>,
|
2025-08-04 03:40:23 +00:00
|
|
|
ldso_file: &Path,
|
2025-07-31 14:50:05 +00:00
|
|
|
ldso_elf: &ElfHeaders,
|
|
|
|
|
) -> Result<LdsoLoadInfo> {
|
2025-04-07 16:09:52 +00:00
|
|
|
let range = map_segment_vmos(ldso_elf, root_vmar, ldso_file)?;
|
|
|
|
|
Ok(LdsoLoadInfo {
|
|
|
|
|
entry_point: range
|
|
|
|
|
.relocated_addr_of(ldso_elf.entry_point())
|
|
|
|
|
.ok_or(Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"The entry point is not in the mapped range",
|
|
|
|
|
))?,
|
|
|
|
|
range,
|
|
|
|
|
_private: (),
|
|
|
|
|
})
|
2023-03-29 03:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
/// Initializes the VM space and maps the VMO to the corresponding virtual memory address.
|
|
|
|
|
///
|
|
|
|
|
/// Returns the mapped range, the entry point and the auxiliary vector.
|
2023-09-01 02:25:37 +00:00
|
|
|
fn init_and_map_vmos(
|
|
|
|
|
process_vm: &ProcessVm,
|
2025-08-04 03:40:23 +00:00
|
|
|
ldso: Option<(Path, ElfHeaders)>,
|
2025-07-31 14:50:05 +00:00
|
|
|
parsed_elf: &ElfHeaders,
|
2025-08-04 03:40:23 +00:00
|
|
|
elf_file: &Path,
|
2025-04-07 16:09:52 +00:00
|
|
|
) -> Result<(RelocatedRange, Vaddr, AuxVec)> {
|
2025-03-17 02:53:56 +00:00
|
|
|
let process_vmar = process_vm.lock_root_vmar();
|
2025-04-06 07:10:00 +00:00
|
|
|
let root_vmar = process_vmar.unwrap();
|
2023-09-01 02:25:37 +00:00
|
|
|
|
|
|
|
|
// After we clear process vm, if any error happens, we must call exit_group instead of return to user space.
|
|
|
|
|
let ldso_load_info = if let Some((ldso_file, ldso_elf)) = ldso {
|
|
|
|
|
Some(load_ldso(root_vmar, &ldso_file, &ldso_elf)?)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
let elf_map_range = map_segment_vmos(parsed_elf, root_vmar, elf_file)?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
|
|
|
|
let aux_vec = {
|
|
|
|
|
let ldso_base = ldso_load_info
|
|
|
|
|
.as_ref()
|
2025-04-07 16:09:52 +00:00
|
|
|
.map(|load_info| load_info.range.relocated_start);
|
|
|
|
|
init_aux_vec(parsed_elf, elf_map_range.relocated_start, ldso_base)?
|
2024-03-26 07:54:08 +00:00
|
|
|
};
|
|
|
|
|
|
2023-09-01 02:25:37 +00:00
|
|
|
let entry_point = if let Some(ldso_load_info) = ldso_load_info {
|
|
|
|
|
// Normal shared object
|
2025-04-07 16:09:52 +00:00
|
|
|
ldso_load_info.entry_point
|
2023-09-01 02:25:37 +00:00
|
|
|
} else {
|
2025-04-07 16:09:52 +00:00
|
|
|
elf_map_range
|
|
|
|
|
.relocated_addr_of(parsed_elf.entry_point())
|
|
|
|
|
.ok_or(Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"The entry point is not in the mapped range",
|
|
|
|
|
))?
|
2023-09-01 02:25:37 +00:00
|
|
|
};
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
Ok((elf_map_range, entry_point, aux_vec))
|
2023-09-01 02:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
2023-03-29 03:35:02 +00:00
|
|
|
pub struct LdsoLoadInfo {
|
2025-04-07 16:09:52 +00:00
|
|
|
/// Relocated entry point.
|
|
|
|
|
pub entry_point: Vaddr,
|
|
|
|
|
/// The range covering all the mapped segments.
|
|
|
|
|
///
|
|
|
|
|
/// May not be page-aligned.
|
|
|
|
|
pub range: RelocatedRange,
|
|
|
|
|
_private: (),
|
2023-03-29 03:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
2023-03-07 09:13:35 +00:00
|
|
|
pub struct ElfLoadInfo {
|
2025-04-07 16:09:52 +00:00
|
|
|
/// Relocated entry point.
|
|
|
|
|
pub entry_point: Vaddr,
|
|
|
|
|
/// Address of the user stack top.
|
|
|
|
|
pub user_stack_top: Vaddr,
|
|
|
|
|
_private: (),
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
/// Initializes a [`Vmo`] for each segment and then map to the root [`Vmar`].
|
|
|
|
|
///
|
|
|
|
|
/// This function will return the mapped range that covers all segments. The
|
|
|
|
|
/// range will be tight, i.e., will not include any padding bytes. So the
|
|
|
|
|
/// boundaries may not be page-aligned.
|
|
|
|
|
///
|
|
|
|
|
/// [`Vmo`]: crate::vm::vmo::Vmo
|
|
|
|
|
pub fn map_segment_vmos(
|
2025-07-31 14:50:05 +00:00
|
|
|
elf: &ElfHeaders,
|
2025-04-07 16:09:52 +00:00
|
|
|
root_vmar: &Vmar<Full>,
|
2025-08-04 03:40:23 +00:00
|
|
|
elf_file: &Path,
|
2025-04-07 16:09:52 +00:00
|
|
|
) -> Result<RelocatedRange> {
|
|
|
|
|
let elf_va_range = get_range_for_all_segments(elf)?;
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
let map_range = if elf.is_shared_object() {
|
|
|
|
|
// Relocatable object.
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
// Allocate a continuous range of virtual memory for all segments in advance.
|
|
|
|
|
//
|
|
|
|
|
// All segments in the ELF program must be mapped to a continuous VM range to
|
|
|
|
|
// ensure the relative offset of each segment not changed.
|
|
|
|
|
let elf_va_range_aligned =
|
|
|
|
|
elf_va_range.start.align_down(PAGE_SIZE)..elf_va_range.end.align_up(PAGE_SIZE);
|
|
|
|
|
let map_size = elf_va_range_aligned.len();
|
|
|
|
|
|
|
|
|
|
let vmar_map_options = root_vmar
|
|
|
|
|
.new_map(map_size, VmPerms::empty())?
|
|
|
|
|
.handle_page_faults_around();
|
|
|
|
|
let aligned_range = vmar_map_options.build().map(|addr| addr..addr + map_size)?;
|
|
|
|
|
|
|
|
|
|
let start_in_page_offset = elf_va_range.start - elf_va_range_aligned.start;
|
|
|
|
|
let end_in_page_offset = elf_va_range_aligned.end - elf_va_range.end;
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
aligned_range.start + start_in_page_offset..aligned_range.end - end_in_page_offset
|
2023-03-29 03:35:02 +00:00
|
|
|
} else {
|
2025-04-07 16:09:52 +00:00
|
|
|
// Not relocatable object. Map as-is.
|
|
|
|
|
elf_va_range.clone()
|
2023-03-29 03:35:02 +00:00
|
|
|
};
|
2025-04-07 16:09:52 +00:00
|
|
|
|
|
|
|
|
let relocated_range =
|
|
|
|
|
RelocatedRange::new(elf_va_range, map_range.start).expect("Mapped range overflows");
|
|
|
|
|
|
2023-03-07 09:13:35 +00:00
|
|
|
for program_header in &elf.program_headers {
|
2025-04-07 16:09:52 +00:00
|
|
|
let type_ = program_header.get_type().map_err(|_| {
|
|
|
|
|
Error::with_message(Errno::ENOEXEC, "Failed to parse the program header")
|
|
|
|
|
})?;
|
2023-03-07 09:13:35 +00:00
|
|
|
if type_ == program::Type::Load {
|
2023-04-04 07:13:32 +00:00
|
|
|
check_segment_align(program_header)?;
|
2025-04-07 16:09:52 +00:00
|
|
|
|
|
|
|
|
let map_at = relocated_range
|
|
|
|
|
.relocated_addr_of(program_header.virtual_addr as Vaddr)
|
|
|
|
|
.expect("Address not covered by `get_range_for_all_segments`");
|
|
|
|
|
|
|
|
|
|
map_segment_vmo(program_header, elf_file, root_vmar, map_at)?;
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
|
|
|
|
}
|
2025-04-07 16:09:52 +00:00
|
|
|
|
|
|
|
|
Ok(relocated_range)
|
2023-03-07 09:13:35 +00:00
|
|
|
}
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
/// A virtual range and its relocated address.
|
|
|
|
|
pub struct RelocatedRange {
|
|
|
|
|
original_range: Range<Vaddr>,
|
|
|
|
|
relocated_start: Vaddr,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl RelocatedRange {
|
|
|
|
|
/// Creates a new `RelocatedRange`.
|
|
|
|
|
///
|
|
|
|
|
/// If the relocated address overflows, it will return `None`.
|
|
|
|
|
pub fn new(original_range: Range<Vaddr>, relocated_start: Vaddr) -> Option<Self> {
|
|
|
|
|
relocated_start.checked_add(original_range.len())?;
|
|
|
|
|
Some(Self {
|
|
|
|
|
original_range,
|
|
|
|
|
relocated_start,
|
2023-04-04 03:32:52 +00:00
|
|
|
})
|
2025-04-07 16:09:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Gets the relocated address of an address in the original range.
|
|
|
|
|
///
|
|
|
|
|
/// If the provided address is not in the original range, it will return `None`.
|
|
|
|
|
pub fn relocated_addr_of(&self, addr: Vaddr) -> Option<Vaddr> {
|
|
|
|
|
if self.original_range.contains(&addr) {
|
|
|
|
|
Some(addr - self.original_range.start + self.relocated_start)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns the range that covers all segments in the ELF file.
|
|
|
|
|
///
|
|
|
|
|
/// The range must be tight, i.e., will not include any padding bytes. So the
|
|
|
|
|
/// boundaries may not be page-aligned.
|
2025-07-31 14:50:05 +00:00
|
|
|
fn get_range_for_all_segments(elf: &ElfHeaders) -> Result<Range<Vaddr>> {
|
2025-04-07 16:09:52 +00:00
|
|
|
let loadable_ranges_iter = elf.program_headers.iter().filter_map(|ph| {
|
|
|
|
|
if let Ok(program::Type::Load) = ph.get_type() {
|
|
|
|
|
Some((ph.virtual_addr as Vaddr)..((ph.virtual_addr + ph.mem_size) as Vaddr))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let min_addr =
|
|
|
|
|
loadable_ranges_iter
|
|
|
|
|
.clone()
|
|
|
|
|
.map(|r| r.start)
|
|
|
|
|
.min()
|
|
|
|
|
.ok_or(Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"Executable file does not has loadable sections",
|
|
|
|
|
))?;
|
|
|
|
|
|
|
|
|
|
let max_addr = loadable_ranges_iter
|
|
|
|
|
.map(|r| r.end)
|
2023-04-04 03:32:52 +00:00
|
|
|
.max()
|
2025-04-07 16:09:52 +00:00
|
|
|
.expect("The range set contains minimum but no maximum");
|
|
|
|
|
|
|
|
|
|
Ok(min_addr..max_addr)
|
2023-03-29 03:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
2024-08-12 08:14:20 +00:00
|
|
|
/// Creates and map the corresponding segment VMO to `root_vmar`.
|
|
|
|
|
/// If needed, create additional anonymous mapping to represents .bss segment.
|
2023-03-07 09:13:35 +00:00
|
|
|
fn map_segment_vmo(
|
|
|
|
|
program_header: &ProgramHeader64,
|
2025-08-04 03:40:23 +00:00
|
|
|
elf_file: &Path,
|
2023-03-07 09:13:35 +00:00
|
|
|
root_vmar: &Vmar<Full>,
|
2025-04-07 16:09:52 +00:00
|
|
|
map_at: Vaddr,
|
2023-03-07 09:13:35 +00:00
|
|
|
) -> Result<()> {
|
2023-03-31 05:52:00 +00:00
|
|
|
trace!(
|
|
|
|
|
"mem range = 0x{:x} - 0x{:x}, mem_size = 0x{:x}",
|
|
|
|
|
program_header.virtual_addr,
|
|
|
|
|
program_header.virtual_addr + program_header.mem_size,
|
|
|
|
|
program_header.mem_size
|
|
|
|
|
);
|
|
|
|
|
trace!(
|
|
|
|
|
"file range = 0x{:x} - 0x{:x}, file_size = 0x{:x}",
|
|
|
|
|
program_header.offset,
|
|
|
|
|
program_header.offset + program_header.file_size,
|
|
|
|
|
program_header.file_size
|
|
|
|
|
);
|
2023-04-04 03:32:52 +00:00
|
|
|
|
2023-03-31 05:52:00 +00:00
|
|
|
let file_offset = program_header.offset as usize;
|
|
|
|
|
let virtual_addr = program_header.virtual_addr as usize;
|
|
|
|
|
debug_assert!(file_offset % PAGE_SIZE == virtual_addr % PAGE_SIZE);
|
2024-08-12 08:14:20 +00:00
|
|
|
let segment_vmo = {
|
2023-09-13 04:07:58 +00:00
|
|
|
let inode = elf_file.inode();
|
2024-08-12 08:14:20 +00:00
|
|
|
inode
|
|
|
|
|
.page_cache()
|
|
|
|
|
.ok_or(Error::with_message(
|
|
|
|
|
Errno::ENOENT,
|
|
|
|
|
"executable has no page cache",
|
|
|
|
|
))?
|
|
|
|
|
.to_dyn()
|
2025-04-30 02:09:38 +00:00
|
|
|
.dup()?
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2024-08-12 08:14:20 +00:00
|
|
|
|
|
|
|
|
let total_map_size = {
|
2024-04-03 07:28:15 +00:00
|
|
|
let vmap_start = virtual_addr.align_down(PAGE_SIZE);
|
|
|
|
|
let vmap_end = (virtual_addr + program_header.mem_size as usize).align_up(PAGE_SIZE);
|
|
|
|
|
vmap_end - vmap_start
|
|
|
|
|
};
|
2023-04-04 03:32:52 +00:00
|
|
|
|
2024-08-12 08:14:20 +00:00
|
|
|
let (segment_offset, segment_size) = {
|
|
|
|
|
let start = file_offset.align_down(PAGE_SIZE);
|
|
|
|
|
let end = (file_offset + program_header.file_size as usize).align_up(PAGE_SIZE);
|
|
|
|
|
debug_assert!(total_map_size >= (program_header.file_size as usize).align_up(PAGE_SIZE));
|
|
|
|
|
(start, end - start)
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2023-03-31 05:52:00 +00:00
|
|
|
|
2024-08-12 08:14:20 +00:00
|
|
|
let perms = parse_segment_perm(program_header.flags);
|
2025-04-07 16:09:52 +00:00
|
|
|
let offset = map_at.align_down(PAGE_SIZE);
|
2024-12-19 03:33:21 +00:00
|
|
|
if segment_size != 0 {
|
|
|
|
|
let mut vm_map_options = root_vmar
|
|
|
|
|
.new_map(segment_size, perms)?
|
2025-04-30 02:09:38 +00:00
|
|
|
.vmo(segment_vmo.dup()?)
|
2024-12-19 03:33:21 +00:00
|
|
|
.vmo_offset(segment_offset)
|
|
|
|
|
.can_overwrite(true);
|
|
|
|
|
vm_map_options = vm_map_options.offset(offset).handle_page_faults_around();
|
2025-04-30 02:09:38 +00:00
|
|
|
let map_addr = vm_map_options.build()?;
|
|
|
|
|
|
|
|
|
|
// Write zero as paddings. There are head padding and tail padding.
|
|
|
|
|
// Head padding: if the segment's virtual address is not page-aligned,
|
|
|
|
|
// then the bytes in first page from start to virtual address should be padded zeros.
|
|
|
|
|
// Tail padding: If the segment's mem_size is larger than file size,
|
|
|
|
|
// then the bytes that are not backed up by file content should be zeros.(usually .data/.bss sections).
|
|
|
|
|
|
2025-07-23 02:33:30 +00:00
|
|
|
// Head padding.
|
|
|
|
|
let page_offset = file_offset % PAGE_SIZE;
|
|
|
|
|
let head_frame = if page_offset != 0 {
|
|
|
|
|
let head_frame =
|
|
|
|
|
segment_vmo.commit_on(segment_offset / PAGE_SIZE, CommitFlags::empty())?;
|
|
|
|
|
let new_frame = duplicate_frame(&head_frame)?;
|
|
|
|
|
|
|
|
|
|
let buffer = vec![0u8; page_offset];
|
|
|
|
|
new_frame.write_bytes(0, &buffer).unwrap();
|
|
|
|
|
Some(new_frame)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Tail padding.
|
|
|
|
|
let tail_padding_offset = program_header.file_size as usize + page_offset;
|
|
|
|
|
let tail_frame_and_addr = if segment_size > tail_padding_offset {
|
|
|
|
|
let tail_frame = {
|
|
|
|
|
let offset_index = (segment_offset + tail_padding_offset) / PAGE_SIZE;
|
|
|
|
|
segment_vmo.commit_on(offset_index, CommitFlags::empty())?
|
|
|
|
|
};
|
|
|
|
|
let new_frame = duplicate_frame(&tail_frame)?;
|
|
|
|
|
|
|
|
|
|
let buffer = vec![0u8; (segment_size - tail_padding_offset) % PAGE_SIZE];
|
|
|
|
|
new_frame
|
|
|
|
|
.write_bytes(tail_padding_offset % PAGE_SIZE, &buffer)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let tail_page_addr = map_addr + tail_padding_offset.align_down(PAGE_SIZE);
|
|
|
|
|
Some((new_frame, tail_page_addr))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2025-05-20 03:39:51 +00:00
|
|
|
let preempt_guard = disable_preempt();
|
2025-04-30 02:09:38 +00:00
|
|
|
let mut cursor = root_vmar
|
|
|
|
|
.vm_space()
|
2025-05-20 03:39:51 +00:00
|
|
|
.cursor_mut(&preempt_guard, &(map_addr..map_addr + segment_size))?;
|
2025-04-30 02:09:38 +00:00
|
|
|
let page_flags = PageFlags::from(perms) | PageFlags::ACCESSED;
|
|
|
|
|
|
2025-07-23 02:33:30 +00:00
|
|
|
if let Some(head_frame) = head_frame {
|
2025-04-30 02:09:38 +00:00
|
|
|
cursor.map(
|
2025-07-23 02:33:30 +00:00
|
|
|
head_frame.into(),
|
2025-06-03 07:25:41 +00:00
|
|
|
PageProperty::new_user(page_flags, CachePolicy::Writeback),
|
2025-04-30 02:09:38 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-23 02:33:30 +00:00
|
|
|
if let Some((tail_frame, tail_page_addr)) = tail_frame_and_addr {
|
2025-04-30 02:09:38 +00:00
|
|
|
cursor.jump(tail_page_addr)?;
|
|
|
|
|
cursor.map(
|
2025-07-23 02:33:30 +00:00
|
|
|
tail_frame.into(),
|
2025-06-03 07:25:41 +00:00
|
|
|
PageProperty::new_user(page_flags, CachePolicy::Writeback),
|
2025-04-30 02:09:38 +00:00
|
|
|
);
|
|
|
|
|
}
|
2024-12-19 03:33:21 +00:00
|
|
|
}
|
2024-08-12 08:14:20 +00:00
|
|
|
|
2024-10-13 13:39:47 +00:00
|
|
|
let anonymous_map_size: usize = total_map_size.saturating_sub(segment_size);
|
2024-08-12 08:14:20 +00:00
|
|
|
if anonymous_map_size > 0 {
|
|
|
|
|
let mut anonymous_map_options = root_vmar
|
|
|
|
|
.new_map(anonymous_map_size, perms)?
|
|
|
|
|
.can_overwrite(true);
|
|
|
|
|
anonymous_map_options = anonymous_map_options.offset(offset + segment_size);
|
|
|
|
|
anonymous_map_options.build()?;
|
2023-03-31 05:52:00 +00:00
|
|
|
}
|
2024-08-12 08:14:20 +00:00
|
|
|
Ok(())
|
2023-03-07 09:13:35 +00:00
|
|
|
}
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2024-05-05 12:51:38 +00:00
|
|
|
fn parse_segment_perm(flags: xmas_elf::program::Flags) -> VmPerms {
|
|
|
|
|
let mut vm_perm = VmPerms::empty();
|
2023-04-03 10:54:28 +00:00
|
|
|
if flags.is_read() {
|
2024-05-05 12:51:38 +00:00
|
|
|
vm_perm |= VmPerms::READ;
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
2023-03-07 09:13:35 +00:00
|
|
|
if flags.is_write() {
|
2024-05-05 12:51:38 +00:00
|
|
|
vm_perm |= VmPerms::WRITE;
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
2023-03-07 09:13:35 +00:00
|
|
|
if flags.is_execute() {
|
2024-05-05 12:51:38 +00:00
|
|
|
vm_perm |= VmPerms::EXEC;
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
2023-04-03 10:54:28 +00:00
|
|
|
vm_perm
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
2023-04-04 07:13:32 +00:00
|
|
|
|
|
|
|
|
fn check_segment_align(program_header: &ProgramHeader64) -> Result<()> {
|
|
|
|
|
let align = program_header.align;
|
|
|
|
|
if align == 0 || align == 1 {
|
|
|
|
|
// no align requirement
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
if !align.is_power_of_two() {
|
|
|
|
|
return_errno_with_message!(Errno::ENOEXEC, "segment align is invalid.");
|
|
|
|
|
}
|
|
|
|
|
if program_header.offset % align != program_header.virtual_addr % align {
|
|
|
|
|
return_errno_with_message!(Errno::ENOEXEC, "segment align is not satisfied.");
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-07-31 14:50:05 +00:00
|
|
|
pub fn init_aux_vec(
|
|
|
|
|
elf: &ElfHeaders,
|
|
|
|
|
elf_map_addr: Vaddr,
|
|
|
|
|
ldso_base: Option<Vaddr>,
|
|
|
|
|
) -> Result<AuxVec> {
|
2024-03-26 07:54:08 +00:00
|
|
|
let mut aux_vec = AuxVec::new();
|
|
|
|
|
aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as _)?;
|
|
|
|
|
let ph_addr = if elf.is_shared_object() {
|
|
|
|
|
elf.ph_addr()? + elf_map_addr
|
|
|
|
|
} else {
|
|
|
|
|
elf.ph_addr()?
|
|
|
|
|
};
|
|
|
|
|
aux_vec.set(AuxKey::AT_PHDR, ph_addr as u64)?;
|
|
|
|
|
aux_vec.set(AuxKey::AT_PHNUM, elf.ph_count() as u64)?;
|
|
|
|
|
aux_vec.set(AuxKey::AT_PHENT, elf.ph_ent() as u64)?;
|
|
|
|
|
let elf_entry = if elf.is_shared_object() {
|
|
|
|
|
let base_load_offset = elf.base_load_address_offset();
|
|
|
|
|
elf.entry_point() + elf_map_addr - base_load_offset as usize
|
|
|
|
|
} else {
|
|
|
|
|
elf.entry_point()
|
|
|
|
|
};
|
|
|
|
|
aux_vec.set(AuxKey::AT_ENTRY, elf_entry as u64)?;
|
|
|
|
|
|
|
|
|
|
if let Some(ldso_base) = ldso_base {
|
|
|
|
|
aux_vec.set(AuxKey::AT_BASE, ldso_base as u64)?;
|
|
|
|
|
}
|
|
|
|
|
Ok(aux_vec)
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-21 13:35:23 +00:00
|
|
|
/// Maps the vDSO VMO to the corresponding virtual memory address.
|
2025-08-07 21:15:53 +00:00
|
|
|
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
|
2024-03-26 07:54:08 +00:00
|
|
|
fn map_vdso_to_vm(process_vm: &ProcessVm) -> Option<Vaddr> {
|
2025-08-07 21:15:53 +00:00
|
|
|
use crate::vdso::{vdso_vmo, VDSO_VMO_LAYOUT};
|
|
|
|
|
|
2025-03-17 02:53:56 +00:00
|
|
|
let process_vmar = process_vm.lock_root_vmar();
|
2025-04-06 07:10:00 +00:00
|
|
|
let root_vmar = process_vmar.unwrap();
|
2024-03-26 07:54:08 +00:00
|
|
|
let vdso_vmo = vdso_vmo()?;
|
|
|
|
|
|
|
|
|
|
let options = root_vmar
|
2025-08-07 21:15:53 +00:00
|
|
|
.new_map(VDSO_VMO_LAYOUT.size, VmPerms::empty())
|
2024-03-26 07:54:08 +00:00
|
|
|
.unwrap()
|
2024-08-12 08:14:20 +00:00
|
|
|
.vmo(vdso_vmo.dup().unwrap());
|
|
|
|
|
|
2025-08-07 21:15:53 +00:00
|
|
|
let vdso_vmo_base = options.build().unwrap();
|
|
|
|
|
let vdso_data_base = vdso_vmo_base + VDSO_VMO_LAYOUT.data_segment_offset;
|
|
|
|
|
let vdso_text_base = vdso_vmo_base + VDSO_VMO_LAYOUT.text_segment_offset;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-08-22 02:19:49 +00:00
|
|
|
let data_perms = VmPerms::READ;
|
2024-03-26 07:54:08 +00:00
|
|
|
let text_perms = VmPerms::READ | VmPerms::EXEC;
|
|
|
|
|
root_vmar
|
2025-08-07 21:15:53 +00:00
|
|
|
.protect(
|
|
|
|
|
data_perms,
|
|
|
|
|
vdso_data_base..(vdso_data_base + VDSO_VMO_LAYOUT.data_segment_size),
|
|
|
|
|
)
|
2024-03-26 07:54:08 +00:00
|
|
|
.unwrap();
|
|
|
|
|
root_vmar
|
2025-08-07 21:15:53 +00:00
|
|
|
.protect(
|
|
|
|
|
text_perms,
|
|
|
|
|
vdso_text_base..(vdso_text_base + VDSO_VMO_LAYOUT.text_segment_size),
|
|
|
|
|
)
|
2024-03-26 07:54:08 +00:00
|
|
|
.unwrap();
|
|
|
|
|
Some(vdso_text_base)
|
|
|
|
|
}
|