2024-01-03 03:22:36 +00:00
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
//! ELF file parser.
|
2025-04-07 16:09:52 +00:00
|
|
|
|
2023-03-26 05:34:42 +00:00
|
|
|
use align_ext::AlignExt;
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
use super::{
|
|
|
|
|
elf_file::{ElfHeaders, LoadablePhdr},
|
|
|
|
|
relocate::RelocatedRange,
|
|
|
|
|
};
|
2024-02-25 14:09:24 +00:00
|
|
|
use crate::{
|
2026-01-25 13:12:11 +00:00
|
|
|
fs::path::{FsPath, Path, PathResolver},
|
2024-02-25 14:09:24 +00:00
|
|
|
prelude::*,
|
2025-12-12 15:07:27 +00:00
|
|
|
process::{
|
|
|
|
|
process_vm::{AuxKey, AuxVec},
|
2025-12-13 13:20:56 +00:00
|
|
|
program_loader::check_executable_inode,
|
2025-12-12 15:07:27 +00:00
|
|
|
},
|
2026-01-12 02:14:02 +00:00
|
|
|
util::random::getrandom,
|
2025-12-12 15:07:27 +00:00
|
|
|
vm::{
|
|
|
|
|
perms::VmPerms,
|
2025-12-25 15:33:03 +00:00
|
|
|
vmar::{VMAR_CAP_ADDR, VMAR_LOWEST_ADDR, Vmar},
|
2025-12-12 15:07:27 +00:00
|
|
|
},
|
2024-02-25 14:09:24 +00:00
|
|
|
};
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2026-01-12 02:14:02 +00:00
|
|
|
/// The base address for PIE (ET_DYN with INTERP) loading.
|
|
|
|
|
///
|
|
|
|
|
/// Linux calls this `ELF_ET_DYN_BASE`. It has some intentions:
|
|
|
|
|
/// - The base load address for PIE programs (ET_DYN with INTERP).
|
|
|
|
|
/// - The heap start address for static PIE programs (ET_DYN without INTERP).
|
|
|
|
|
///
|
|
|
|
|
/// References: <https://elixir.bootlin.com/linux/v6.16.9/source/arch/x86/include/asm/elf.h#L235>
|
|
|
|
|
/// - x86_64: ELF_ET_DYN_BASE = DEFAULT_MAP_WINDOW / 3 * 2
|
|
|
|
|
/// - riscv64: ELF_ET_DYN_BASE = (DEFAULT_MAP_WINDOW / 3) * 2
|
|
|
|
|
/// - loongarch64: ELF_ET_DYN_BASE = TASK_SIZE / 3 * 2
|
|
|
|
|
const PIE_BASE_ADDR: Vaddr = VMAR_CAP_ADDR / 3 * 2;
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
pub struct ElfLoadInfo {
|
|
|
|
|
/// The relocated entry point.
|
|
|
|
|
pub entry_point: Vaddr,
|
|
|
|
|
/// The top address of the user stack.
|
|
|
|
|
pub user_stack_top: Vaddr,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Loads an ELF file to the process VMAR.
|
2024-03-26 07:54:08 +00:00
|
|
|
///
|
2025-12-12 15:07:27 +00:00
|
|
|
/// This function will map ELF segments and
|
|
|
|
|
/// initialize the init stack and heap.
|
2025-10-17 09:55:41 +00:00
|
|
|
pub fn load_elf_to_vmar(
|
2025-10-24 02:04:38 +00:00
|
|
|
vmar: &Vmar,
|
2026-01-25 13:12:11 +00:00
|
|
|
elf_file: Path,
|
2026-01-12 08:43:24 +00:00
|
|
|
path_resolver: &PathResolver,
|
2025-07-31 14:50:05 +00:00
|
|
|
elf_headers: ElfHeaders,
|
2023-03-07 09:13:35 +00:00
|
|
|
argv: Vec<CString>,
|
|
|
|
|
envp: Vec<CString>,
|
|
|
|
|
) -> Result<ElfLoadInfo> {
|
2026-01-25 13:12:11 +00:00
|
|
|
let ldso = lookup_and_parse_ldso(&elf_headers, &elf_file, path_resolver)?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-09-25 11:40:52 +00:00
|
|
|
#[cfg_attr(
|
|
|
|
|
not(any(target_arch = "x86_64", target_arch = "riscv64")),
|
|
|
|
|
expect(unused_mut)
|
|
|
|
|
)]
|
2026-01-12 03:21:40 +00:00
|
|
|
let (elf_mapped_info, entry_point, mut aux_vec) =
|
2026-01-25 13:12:11 +00:00
|
|
|
map_vmos_and_build_aux_vec(vmar, ldso, &elf_headers, &elf_file)?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-09-25 11:40:52 +00:00
|
|
|
// Map the vDSO and set the entry.
|
|
|
|
|
// Since the vDSO does not require being mapped to any specific address,
|
|
|
|
|
// the vDSO is mapped after the ELF file, heap, and stack.
|
|
|
|
|
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
|
2025-10-17 09:55:41 +00:00
|
|
|
if let Some(vdso_text_base) = map_vdso_to_vmar(vmar) {
|
2025-09-25 11:40:52 +00:00
|
|
|
#[cfg(target_arch = "riscv64")]
|
2025-10-24 14:56:19 +00:00
|
|
|
vmar.process_vm().set_vdso_base(vdso_text_base);
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_SYSINFO_EHDR, vdso_text_base as u64);
|
2025-09-25 11:40:52 +00:00
|
|
|
}
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-10-24 14:56:19 +00:00
|
|
|
vmar.process_vm()
|
|
|
|
|
.map_and_write_init_stack(vmar, argv, envp, aux_vec)?;
|
2026-01-12 03:21:40 +00:00
|
|
|
vmar.process_vm().map_and_init_heap(
|
|
|
|
|
vmar,
|
|
|
|
|
elf_mapped_info.data_segment_size,
|
|
|
|
|
elf_mapped_info.heap_base,
|
|
|
|
|
)?;
|
2024-05-08 10:38:36 +00:00
|
|
|
|
2025-10-24 14:56:19 +00:00
|
|
|
let user_stack_top = vmar.process_vm().init_stack().user_stack_top();
|
2025-09-25 11:40:52 +00:00
|
|
|
Ok(ElfLoadInfo {
|
|
|
|
|
entry_point,
|
|
|
|
|
user_stack_top,
|
|
|
|
|
})
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
|
|
|
|
|
2023-09-01 02:25:37 +00:00
|
|
|
fn lookup_and_parse_ldso(
|
2025-07-31 14:50:05 +00:00
|
|
|
headers: &ElfHeaders,
|
2026-01-25 13:12:11 +00:00
|
|
|
elf_file: &Path,
|
2026-01-12 08:43:24 +00:00
|
|
|
path_resolver: &PathResolver,
|
2025-08-04 03:40:23 +00:00
|
|
|
) -> Result<Option<(Path, ElfHeaders)>> {
|
2023-04-04 03:32:52 +00:00
|
|
|
let ldso_file = {
|
2025-12-12 15:07:27 +00:00
|
|
|
let ldso_path = if let Some(interp_phdr) = headers.interp_phdr() {
|
2026-01-25 13:12:11 +00:00
|
|
|
interp_phdr.read_ldso_path(elf_file.inode())?
|
2025-12-12 15:07:27 +00:00
|
|
|
} else {
|
2024-06-06 08:23:23 +00:00
|
|
|
return Ok(None);
|
|
|
|
|
};
|
2025-12-12 15:07:27 +00:00
|
|
|
|
2025-07-31 14:50:05 +00:00
|
|
|
// Our FS requires the path to be valid UTF-8. This may be too restrictive.
|
|
|
|
|
let ldso_path = ldso_path.into_string().map_err(|_| {
|
|
|
|
|
Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
2025-12-12 15:07:27 +00:00
|
|
|
"the interpreter path is not a valid UTF-8 string",
|
2025-07-31 14:50:05 +00:00
|
|
|
)
|
|
|
|
|
})?;
|
2025-12-12 15:07:27 +00:00
|
|
|
|
2025-10-16 15:57:17 +00:00
|
|
|
let fs_path = FsPath::try_from(ldso_path.as_str())?;
|
2026-01-12 08:43:24 +00:00
|
|
|
path_resolver.lookup(&fs_path)?
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2025-12-12 15:07:27 +00:00
|
|
|
|
2023-04-04 03:32:52 +00:00
|
|
|
let ldso_elf = {
|
2024-04-23 08:04:31 +00:00
|
|
|
let inode = ldso_file.inode();
|
2025-12-13 13:20:56 +00:00
|
|
|
check_executable_inode(inode.as_ref())?;
|
2025-12-12 15:07:27 +00:00
|
|
|
|
|
|
|
|
let mut buf = Box::new([0u8; PAGE_SIZE]);
|
|
|
|
|
let len = inode.read_bytes_at(0, &mut *buf)?;
|
|
|
|
|
if len < ElfHeaders::LEN {
|
|
|
|
|
return_errno_with_message!(Errno::EIO, "the interpreter format is invalid");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ElfHeaders::parse(&buf[..len])
|
|
|
|
|
.map_err(|_| Error::with_message(Errno::ELIBBAD, "the interpreter format is invalid"))?
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2023-09-01 02:25:37 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
Ok(Some((ldso_file, ldso_elf)))
|
2023-03-29 03:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
/// Maps the VMOs to the corresponding virtual memory addresses and builds the auxiliary vector.
|
2025-04-07 16:09:52 +00:00
|
|
|
///
|
2026-01-12 03:21:40 +00:00
|
|
|
/// Returns the mapped information, the entry point, and the auxiliary vector.
|
2025-12-12 15:07:27 +00:00
|
|
|
fn map_vmos_and_build_aux_vec(
|
2025-10-24 02:04:38 +00:00
|
|
|
vmar: &Vmar,
|
2025-08-04 03:40:23 +00:00
|
|
|
ldso: Option<(Path, ElfHeaders)>,
|
2025-07-31 14:50:05 +00:00
|
|
|
parsed_elf: &ElfHeaders,
|
2026-01-25 13:12:11 +00:00
|
|
|
elf_file: &Path,
|
2026-01-12 03:21:40 +00:00
|
|
|
) -> Result<(ElfMappedInfo, Vaddr, AuxVec)> {
|
2023-09-01 02:25:37 +00:00
|
|
|
let ldso_load_info = if let Some((ldso_file, ldso_elf)) = ldso {
|
2025-10-17 09:55:41 +00:00
|
|
|
Some(load_ldso(vmar, &ldso_file, &ldso_elf)?)
|
2023-09-01 02:25:37 +00:00
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2026-01-25 13:12:11 +00:00
|
|
|
let elf_mapped_info = map_segment_vmos(parsed_elf, vmar, elf_file, ldso_load_info.is_some())?;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-10-28 08:51:50 +00:00
|
|
|
let mut aux_vec = {
|
2024-03-26 07:54:08 +00:00
|
|
|
let ldso_base = ldso_load_info
|
|
|
|
|
.as_ref()
|
2025-12-12 15:07:27 +00:00
|
|
|
.map(|load_info| load_info.range.relocated_start());
|
2026-01-12 03:21:40 +00:00
|
|
|
init_aux_vec(parsed_elf, &elf_mapped_info.full_range, ldso_base)?
|
2024-03-26 07:54:08 +00:00
|
|
|
};
|
|
|
|
|
|
2025-10-28 08:51:50 +00:00
|
|
|
// Set AT_SECURE based on setuid/setgid bits of the executable file.
|
2026-01-25 13:12:11 +00:00
|
|
|
let mode = elf_file.inode().mode()?;
|
2025-10-28 08:51:50 +00:00
|
|
|
let secure = if mode.has_set_uid() || mode.has_set_gid() {
|
|
|
|
|
1
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_SECURE, secure);
|
2025-10-28 08:51:50 +00:00
|
|
|
|
2023-09-01 02:25:37 +00:00
|
|
|
let entry_point = if let Some(ldso_load_info) = ldso_load_info {
|
2025-04-07 16:09:52 +00:00
|
|
|
ldso_load_info.entry_point
|
2023-09-01 02:25:37 +00:00
|
|
|
} else {
|
2026-01-12 03:21:40 +00:00
|
|
|
elf_mapped_info
|
|
|
|
|
.full_range
|
2025-04-07 16:09:52 +00:00
|
|
|
.relocated_addr_of(parsed_elf.entry_point())
|
2025-12-12 15:07:27 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
|
Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"the entry point is not located in any segments",
|
|
|
|
|
)
|
|
|
|
|
})?
|
2023-09-01 02:25:37 +00:00
|
|
|
};
|
|
|
|
|
|
2026-01-12 03:21:40 +00:00
|
|
|
Ok((elf_mapped_info, entry_point, aux_vec))
|
2023-09-01 02:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
struct LdsoLoadInfo {
|
|
|
|
|
/// The relocated entry point.
|
|
|
|
|
entry_point: Vaddr,
|
2025-04-07 16:09:52 +00:00
|
|
|
/// The range covering all the mapped segments.
|
|
|
|
|
///
|
2025-12-12 15:07:27 +00:00
|
|
|
/// Note that the range may not be page-aligned.
|
|
|
|
|
range: RelocatedRange,
|
2023-03-29 03:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
fn load_ldso(vmar: &Vmar, ldso_file: &Path, ldso_elf: &ElfHeaders) -> Result<LdsoLoadInfo> {
|
2026-01-25 13:12:11 +00:00
|
|
|
let elf_mapped_info = map_segment_vmos(ldso_elf, vmar, ldso_file, false)?;
|
2026-01-12 03:21:40 +00:00
|
|
|
let range = elf_mapped_info.full_range;
|
2025-12-12 15:07:27 +00:00
|
|
|
let entry_point = range
|
|
|
|
|
.relocated_addr_of(ldso_elf.entry_point())
|
|
|
|
|
.ok_or_else(|| {
|
|
|
|
|
Error::with_message(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"the entry point is not located in any segments",
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
Ok(LdsoLoadInfo { entry_point, range })
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
|
|
|
|
|
2026-01-12 03:21:40 +00:00
|
|
|
/// The information of mapped ELF segments.
|
|
|
|
|
struct ElfMappedInfo {
|
|
|
|
|
/// The range covering all the mapped segments.
|
|
|
|
|
full_range: RelocatedRange,
|
|
|
|
|
/// The size of the data segment.
|
|
|
|
|
data_segment_size: usize,
|
|
|
|
|
/// The base address for the heap start.
|
|
|
|
|
heap_base: Vaddr,
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-17 09:55:41 +00:00
|
|
|
/// Initializes a [`Vmo`] for each segment and then map to the [`Vmar`].
|
2025-04-07 16:09:52 +00:00
|
|
|
///
|
2026-01-12 03:21:40 +00:00
|
|
|
/// This function will return the mapped information, which contains the
|
|
|
|
|
/// mapped range that covers all segments. The range will be tight,
|
|
|
|
|
/// i.e., will not include any padding bytes. So the boundaries may not
|
|
|
|
|
/// be page-aligned.
|
2025-04-07 16:09:52 +00:00
|
|
|
///
|
|
|
|
|
/// [`Vmo`]: crate::vm::vmo::Vmo
|
2025-12-12 15:07:27 +00:00
|
|
|
fn map_segment_vmos(
|
2025-10-22 16:21:14 +00:00
|
|
|
elf: &ElfHeaders,
|
|
|
|
|
vmar: &Vmar,
|
2026-01-25 13:12:11 +00:00
|
|
|
elf_file: &Path,
|
2026-01-12 02:14:02 +00:00
|
|
|
has_interpreter: bool,
|
2026-01-12 03:21:40 +00:00
|
|
|
) -> Result<ElfMappedInfo> {
|
2025-12-12 15:07:27 +00:00
|
|
|
let elf_va_range = elf.calc_total_vaddr_bounds();
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2026-01-12 03:21:40 +00:00
|
|
|
// The base address for the heap start. If it's `None`, we will use the end of ELF segments.
|
|
|
|
|
let mut heap_base = None;
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
let map_range = if elf.is_shared_object() {
|
|
|
|
|
// Relocatable object.
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-12-25 15:33:03 +00:00
|
|
|
let align = elf.max_load_align();
|
|
|
|
|
|
|
|
|
|
// Given that `elf_va_range` is guaranteed to be below `VMAR_CAP_ADDR`, as long as
|
|
|
|
|
// `VMAR_CAP_ADDR * 2` does not overflow, the following `align_up(align)` cannot overflow
|
|
|
|
|
// either.
|
|
|
|
|
const { assert!(VMAR_CAP_ADDR.checked_mul(2).is_some()) };
|
|
|
|
|
|
2025-04-07 16:09:52 +00:00
|
|
|
// Allocate a continuous range of virtual memory for all segments in advance.
|
|
|
|
|
//
|
|
|
|
|
// All segments in the ELF program must be mapped to a continuous VM range to
|
|
|
|
|
// ensure the relative offset of each segment not changed.
|
|
|
|
|
let elf_va_range_aligned =
|
2025-12-25 15:33:03 +00:00
|
|
|
elf_va_range.start.align_down(align)..elf_va_range.end.align_up(align);
|
2025-04-07 16:09:52 +00:00
|
|
|
let map_size = elf_va_range_aligned.len();
|
|
|
|
|
|
2026-01-12 02:14:02 +00:00
|
|
|
// There are effectively two types of ET_DYN ELF binaries:
|
|
|
|
|
// - PIE programs (ET_DYN with PT_INTERP) and
|
|
|
|
|
// - static PIE programs (ET_DYN without PT_INTERP, usually the ELF interpreter itself).
|
|
|
|
|
//
|
|
|
|
|
// Reference: <https://elixir.bootlin.com/linux/v6.19-rc2/source/fs/binfmt_elf.c#L1109>
|
|
|
|
|
let vmar_map_options = if has_interpreter {
|
|
|
|
|
// PIE program: map near a dedicated base.
|
|
|
|
|
|
|
|
|
|
// Add some random padding.
|
|
|
|
|
let nr_pages_padding = {
|
|
|
|
|
let mut nr_random_padding_pages: u8 = 0;
|
2026-01-27 08:35:10 +00:00
|
|
|
getrandom(nr_random_padding_pages.as_mut_bytes());
|
2026-01-12 02:14:02 +00:00
|
|
|
nr_random_padding_pages as usize
|
|
|
|
|
};
|
|
|
|
|
let offset = (PIE_BASE_ADDR + nr_pages_padding * PAGE_SIZE).align_down(align);
|
|
|
|
|
|
|
|
|
|
if offset < VMAR_LOWEST_ADDR {
|
|
|
|
|
return_errno_with_message!(Errno::EPERM, "the mapping address is too small");
|
|
|
|
|
}
|
|
|
|
|
if VMAR_CAP_ADDR - offset < map_size {
|
|
|
|
|
return_errno_with_message!(Errno::ENOMEM, "the mapping address is too large");
|
|
|
|
|
}
|
|
|
|
|
vmar.new_map(map_size, VmPerms::empty())?
|
|
|
|
|
.align(align)
|
|
|
|
|
.offset(offset)
|
|
|
|
|
} else {
|
|
|
|
|
// Static PIE program: pick an aligned address from the mmap region.
|
2026-01-12 03:21:40 +00:00
|
|
|
|
|
|
|
|
// When executing static PIE programs, place the heap area away from the
|
|
|
|
|
// general mmap region and into the unused `PIE_BASE_ADDR` space.
|
|
|
|
|
// This helps avoid early collisions, since the heap grows upward while
|
|
|
|
|
// the stack grows downward, and other mappings (such as the vDSO) may
|
|
|
|
|
// also be placed in the mmap region.
|
|
|
|
|
//
|
|
|
|
|
// Reference: <https://elixir.bootlin.com/linux/v6.16.9/source/fs/binfmt_elf.c#L1293>
|
|
|
|
|
heap_base = Some(PIE_BASE_ADDR);
|
|
|
|
|
|
2026-01-12 02:14:02 +00:00
|
|
|
vmar.new_map(map_size, VmPerms::empty())?.align(align)
|
|
|
|
|
};
|
2025-04-07 16:09:52 +00:00
|
|
|
let aligned_range = vmar_map_options.build().map(|addr| addr..addr + map_size)?;
|
|
|
|
|
|
2026-01-12 02:14:02 +00:00
|
|
|
// After acquiring a suitable range, we can remove the mapping and then
|
|
|
|
|
// map each segment at the desired address.
|
|
|
|
|
vmar.remove_mapping(aligned_range.clone())?;
|
|
|
|
|
|
2025-12-25 15:33:03 +00:00
|
|
|
let start_offset = elf_va_range.start - elf_va_range_aligned.start;
|
|
|
|
|
let end_offset = elf_va_range_aligned.end - elf_va_range.end;
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-12-25 15:33:03 +00:00
|
|
|
aligned_range.start + start_offset..aligned_range.end - end_offset
|
2023-03-29 03:35:02 +00:00
|
|
|
} else {
|
2025-04-07 16:09:52 +00:00
|
|
|
// Not relocatable object. Map as-is.
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
if elf_va_range.start < VMAR_LOWEST_ADDR {
|
|
|
|
|
return_errno_with_message!(Errno::EPERM, "the mapping address is too small");
|
2022-12-07 11:22:37 +00:00
|
|
|
}
|
2025-04-07 16:09:52 +00:00
|
|
|
|
2025-12-25 15:39:17 +00:00
|
|
|
// Allocate a continuous range of virtual memory for all segments in advance.
|
|
|
|
|
//
|
|
|
|
|
// This is to ensure that the range does not conflict with other objects, such
|
|
|
|
|
// as the interpreter.
|
|
|
|
|
let elf_va_range_aligned =
|
|
|
|
|
elf_va_range.start.align_down(PAGE_SIZE)..elf_va_range.end.align_up(PAGE_SIZE);
|
|
|
|
|
let map_size = elf_va_range_aligned.len();
|
|
|
|
|
|
|
|
|
|
vmar.new_map(map_size, VmPerms::empty())?
|
|
|
|
|
.offset(elf_va_range_aligned.start)
|
|
|
|
|
.build()?;
|
|
|
|
|
|
2026-01-12 02:14:02 +00:00
|
|
|
// After acquiring a suitable range, we can remove the mapping and then
|
|
|
|
|
// map each segment at the desired address.
|
|
|
|
|
vmar.remove_mapping(elf_va_range_aligned.clone())?;
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
elf_va_range.clone()
|
|
|
|
|
};
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
let relocated_range = RelocatedRange::new(elf_va_range, map_range.start)
|
|
|
|
|
.expect("`map_range` should not overflow");
|
2025-04-07 16:09:52 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
for loadable_phdr in elf.loadable_phdrs() {
|
|
|
|
|
let map_at = relocated_range
|
|
|
|
|
.relocated_addr_of(loadable_phdr.virt_range().start)
|
|
|
|
|
.expect("`calc_total_vaddr_bounds()` should cover all segments");
|
2026-01-25 13:12:11 +00:00
|
|
|
map_segment_vmo(loadable_phdr, elf_file, vmar, map_at)?;
|
2025-04-07 16:09:52 +00:00
|
|
|
}
|
|
|
|
|
|
2026-01-12 03:21:40 +00:00
|
|
|
// Calculate the data segment size.
|
|
|
|
|
// According to Linux behavior, the data segment only includes the last loadable segment.
|
|
|
|
|
// Reference: <https://elixir.bootlin.com/linux/v6.16.9/source/fs/binfmt_elf.c#L1200-L1227>
|
|
|
|
|
let data_segment_size = elf.find_last_vaddr_bound().map_or(0, |range| range.len());
|
|
|
|
|
|
|
|
|
|
Ok(ElfMappedInfo {
|
|
|
|
|
full_range: relocated_range,
|
|
|
|
|
data_segment_size,
|
|
|
|
|
heap_base: heap_base.unwrap_or(map_range.end),
|
|
|
|
|
})
|
2025-04-07 16:09:52 +00:00
|
|
|
}
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
/// Creates and maps the segment VMO to the VMAR.
|
2025-04-07 16:09:52 +00:00
|
|
|
///
|
2025-12-12 15:07:27 +00:00
|
|
|
/// Additional anonymous mappings will be created to represent trailing bytes, if any. For example,
|
|
|
|
|
/// this applies to the `.bss` segment.
|
2023-03-07 09:13:35 +00:00
|
|
|
fn map_segment_vmo(
|
2025-12-12 15:07:27 +00:00
|
|
|
loadable_phdr: &LoadablePhdr,
|
2026-01-25 13:12:11 +00:00
|
|
|
elf_file: &Path,
|
2025-10-24 02:04:38 +00:00
|
|
|
vmar: &Vmar,
|
2025-04-07 16:09:52 +00:00
|
|
|
map_at: Vaddr,
|
2023-03-07 09:13:35 +00:00
|
|
|
) -> Result<()> {
|
2026-01-25 13:12:11 +00:00
|
|
|
let Some(elf_vmo) = elf_file.inode().page_cache() else {
|
|
|
|
|
return_errno_with_message!(Errno::ENOEXEC, "the executable has no page cache");
|
|
|
|
|
};
|
|
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
let virt_range = loadable_phdr.virt_range();
|
|
|
|
|
let file_range = loadable_phdr.file_range();
|
2023-03-31 05:52:00 +00:00
|
|
|
trace!(
|
2025-12-12 15:07:27 +00:00
|
|
|
"ELF segment: virt_range = {:#x?}, file_range = {:#x?}",
|
|
|
|
|
virt_range, file_range,
|
2023-03-31 05:52:00 +00:00
|
|
|
);
|
2023-04-04 03:32:52 +00:00
|
|
|
|
2024-08-12 08:14:20 +00:00
|
|
|
let total_map_size = {
|
2025-12-12 15:07:27 +00:00
|
|
|
let vmap_start = virt_range.start.align_down(PAGE_SIZE);
|
|
|
|
|
let vmap_end = virt_range.end.align_up(PAGE_SIZE);
|
2024-04-03 07:28:15 +00:00
|
|
|
vmap_end - vmap_start
|
|
|
|
|
};
|
2023-04-04 03:32:52 +00:00
|
|
|
|
2024-08-12 08:14:20 +00:00
|
|
|
let (segment_offset, segment_size) = {
|
2025-12-12 15:07:27 +00:00
|
|
|
let start = file_range.start.align_down(PAGE_SIZE);
|
|
|
|
|
let end = file_range.end.align_up(PAGE_SIZE);
|
2024-08-12 08:14:20 +00:00
|
|
|
(start, end - start)
|
2023-04-04 03:32:52 +00:00
|
|
|
};
|
2023-03-31 05:52:00 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
let perms = loadable_phdr.vm_perms();
|
2025-04-07 16:09:52 +00:00
|
|
|
let offset = map_at.align_down(PAGE_SIZE);
|
2025-12-12 15:07:27 +00:00
|
|
|
|
2024-12-19 03:33:21 +00:00
|
|
|
if segment_size != 0 {
|
2025-10-17 09:55:41 +00:00
|
|
|
let mut vm_map_options = vmar
|
2024-12-19 03:33:21 +00:00
|
|
|
.new_map(segment_size, perms)?
|
2025-12-12 15:07:27 +00:00
|
|
|
.vmo(elf_vmo.clone())
|
2026-01-22 03:49:15 +00:00
|
|
|
.path(elf_file.clone())
|
2024-12-19 03:33:21 +00:00
|
|
|
.vmo_offset(segment_offset)
|
|
|
|
|
.can_overwrite(true);
|
|
|
|
|
vm_map_options = vm_map_options.offset(offset).handle_page_faults_around();
|
2025-04-30 02:09:38 +00:00
|
|
|
let map_addr = vm_map_options.build()?;
|
|
|
|
|
|
2025-12-04 13:23:29 +00:00
|
|
|
// Write zero as paddings if the tail is not page-aligned and map size
|
|
|
|
|
// is larger than file size (e.g., `.bss`). The mapping is by default
|
|
|
|
|
// private so the writes will trigger copy-on-write. Ignore errors if
|
|
|
|
|
// the permissions do not allow writing.
|
|
|
|
|
// Reference: <https://elixir.bootlin.com/linux/v6.17/source/fs/binfmt_elf.c#L410-L422>
|
2025-12-12 15:07:27 +00:00
|
|
|
let vaddr_to_zero = map_addr + (file_range.end - segment_offset);
|
|
|
|
|
let size_to_zero = map_addr + segment_size - vaddr_to_zero;
|
|
|
|
|
if size_to_zero != 0 {
|
|
|
|
|
let res = vmar.fill_zeros_remote(vaddr_to_zero, size_to_zero);
|
|
|
|
|
if let Err((err, _)) = res
|
|
|
|
|
&& perms.contains(VmPerms::WRITE)
|
|
|
|
|
{
|
|
|
|
|
return Err(err);
|
|
|
|
|
}
|
2025-04-30 02:09:38 +00:00
|
|
|
}
|
2024-12-19 03:33:21 +00:00
|
|
|
}
|
2024-08-12 08:14:20 +00:00
|
|
|
|
2025-12-12 15:07:27 +00:00
|
|
|
let anonymous_map_size = total_map_size - segment_size;
|
2024-08-12 08:14:20 +00:00
|
|
|
if anonymous_map_size > 0 {
|
2025-10-17 09:55:41 +00:00
|
|
|
let mut anonymous_map_options =
|
|
|
|
|
vmar.new_map(anonymous_map_size, perms)?.can_overwrite(true);
|
2024-08-12 08:14:20 +00:00
|
|
|
anonymous_map_options = anonymous_map_options.offset(offset + segment_size);
|
|
|
|
|
anonymous_map_options.build()?;
|
2023-03-31 05:52:00 +00:00
|
|
|
}
|
2022-12-07 11:22:37 +00:00
|
|
|
|
2023-04-04 07:13:32 +00:00
|
|
|
Ok(())
|
|
|
|
|
}
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-12-12 03:00:56 +00:00
|
|
|
fn init_aux_vec(
|
2025-07-31 14:50:05 +00:00
|
|
|
elf: &ElfHeaders,
|
2025-12-12 03:00:56 +00:00
|
|
|
elf_map_range: &RelocatedRange,
|
2025-07-31 14:50:05 +00:00
|
|
|
ldso_base: Option<Vaddr>,
|
|
|
|
|
) -> Result<AuxVec> {
|
2024-03-26 07:54:08 +00:00
|
|
|
let mut aux_vec = AuxVec::new();
|
2025-12-12 03:00:56 +00:00
|
|
|
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_PAGESZ, PAGE_SIZE as _);
|
2025-12-12 03:00:56 +00:00
|
|
|
|
|
|
|
|
let Some(ph_vaddr) = elf_map_range.relocated_addr_of(elf.find_vaddr_of_phdrs()?) else {
|
|
|
|
|
return_errno_with_message!(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"the ELF program headers are not located in any segments"
|
|
|
|
|
);
|
2024-03-26 07:54:08 +00:00
|
|
|
};
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_PHDR, ph_vaddr as u64);
|
|
|
|
|
aux_vec.set(AuxKey::AT_PHNUM, elf.ph_count() as u64);
|
|
|
|
|
aux_vec.set(AuxKey::AT_PHENT, elf.ph_ent() as u64);
|
2025-12-12 03:00:56 +00:00
|
|
|
|
|
|
|
|
let Some(entry_vaddr) = elf_map_range.relocated_addr_of(elf.entry_point()) else {
|
|
|
|
|
return_errno_with_message!(
|
|
|
|
|
Errno::ENOEXEC,
|
|
|
|
|
"the entry point is not located in any segments"
|
|
|
|
|
);
|
2024-03-26 07:54:08 +00:00
|
|
|
};
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_ENTRY, entry_vaddr as u64);
|
2024-03-26 07:54:08 +00:00
|
|
|
|
|
|
|
|
if let Some(ldso_base) = ldso_base {
|
2026-01-13 16:39:45 +00:00
|
|
|
aux_vec.set(AuxKey::AT_BASE, ldso_base as u64);
|
2024-03-26 07:54:08 +00:00
|
|
|
}
|
2025-12-12 03:00:56 +00:00
|
|
|
|
2024-03-26 07:54:08 +00:00
|
|
|
Ok(aux_vec)
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-21 13:35:23 +00:00
|
|
|
/// Maps the vDSO VMO to the corresponding virtual memory address.
|
2025-08-07 21:15:53 +00:00
|
|
|
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
|
2025-10-24 02:04:38 +00:00
|
|
|
fn map_vdso_to_vmar(vmar: &Vmar) -> Option<Vaddr> {
|
2025-12-08 12:53:18 +00:00
|
|
|
use crate::vdso::{VDSO_VMO_LAYOUT, vdso_vmo};
|
2025-08-07 21:15:53 +00:00
|
|
|
|
2024-03-26 07:54:08 +00:00
|
|
|
let vdso_vmo = vdso_vmo()?;
|
|
|
|
|
|
2025-10-17 09:55:41 +00:00
|
|
|
let options = vmar
|
2025-08-07 21:15:53 +00:00
|
|
|
.new_map(VDSO_VMO_LAYOUT.size, VmPerms::empty())
|
2024-03-26 07:54:08 +00:00
|
|
|
.unwrap()
|
2025-10-28 08:50:57 +00:00
|
|
|
.vmo(vdso_vmo);
|
2024-08-12 08:14:20 +00:00
|
|
|
|
2025-08-07 21:15:53 +00:00
|
|
|
let vdso_vmo_base = options.build().unwrap();
|
|
|
|
|
let vdso_data_base = vdso_vmo_base + VDSO_VMO_LAYOUT.data_segment_offset;
|
|
|
|
|
let vdso_text_base = vdso_vmo_base + VDSO_VMO_LAYOUT.text_segment_offset;
|
2024-03-26 07:54:08 +00:00
|
|
|
|
2025-08-22 02:19:49 +00:00
|
|
|
let data_perms = VmPerms::READ;
|
2024-03-26 07:54:08 +00:00
|
|
|
let text_perms = VmPerms::READ | VmPerms::EXEC;
|
2025-10-17 09:55:41 +00:00
|
|
|
vmar.protect(
|
|
|
|
|
data_perms,
|
|
|
|
|
vdso_data_base..(vdso_data_base + VDSO_VMO_LAYOUT.data_segment_size),
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
vmar.protect(
|
|
|
|
|
text_perms,
|
|
|
|
|
vdso_text_base..(vdso_text_base + VDSO_VMO_LAYOUT.text_segment_size),
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2024-03-26 07:54:08 +00:00
|
|
|
Some(vdso_text_base)
|
|
|
|
|
}
|