diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 8ad803a66..a6d4493c1 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -73,6 +73,8 @@ pub mod syscall; pub mod thread; pub mod time; mod util; +// TODO: Add vDSO support for other architectures. +#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))] pub(crate) mod vdso; pub mod vm; @@ -106,6 +108,7 @@ pub fn init() { fs::rootfs::init(boot_info().initramfs.expect("No initramfs found!")).unwrap(); device::init().unwrap(); syscall::init(); + #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))] vdso::init(); process::init(); } diff --git a/kernel/src/process/program_loader/elf/load_elf.rs b/kernel/src/process/program_loader/elf/load_elf.rs index 420bc0b62..f21e1192b 100644 --- a/kernel/src/process/program_loader/elf/load_elf.rs +++ b/kernel/src/process/program_loader/elf/load_elf.rs @@ -25,7 +25,6 @@ use crate::{ process_vm::{AuxKey, AuxVec, ProcessVm}, TermStatus, }, - vdso::{vdso_vmo, VDSO_VMO_SIZE}, vm::{ perms::VmPerms, util::duplicate_frame, @@ -49,10 +48,15 @@ pub fn load_elf_to_vm( let ldso = lookup_and_parse_ldso(&elf_headers, &elf_file, fs_resolver)?; match init_and_map_vmos(process_vm, ldso, &elf_headers, &elf_file) { + #[cfg_attr( + not(any(target_arch = "x86_64", target_arch = "riscv64")), + expect(unused_mut) + )] Ok((_range, entry_point, mut aux_vec)) => { - // Map and set vdso entry. - // Since vdso does not require being mapped to any specific address, - // vdso is mapped after the elf file, heap and stack are mapped. + // Map the vDSO and set the entry. + // Since the vDSO does not require being mapped to any specific address, + // the vDSO is mapped after the ELF file, heap, and stack. + #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))] if let Some(vdso_text_base) = map_vdso_to_vm(process_vm) { aux_vec .set(AuxKey::AT_SYSINFO_EHDR, vdso_text_base as u64) @@ -507,26 +511,36 @@ pub fn init_aux_vec( } /// Maps the vDSO VMO to the corresponding virtual memory address. +#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))] fn map_vdso_to_vm(process_vm: &ProcessVm) -> Option { + use crate::vdso::{vdso_vmo, VDSO_VMO_LAYOUT}; + let process_vmar = process_vm.lock_root_vmar(); let root_vmar = process_vmar.unwrap(); let vdso_vmo = vdso_vmo()?; let options = root_vmar - .new_map(VDSO_VMO_SIZE, VmPerms::empty()) + .new_map(VDSO_VMO_LAYOUT.size, VmPerms::empty()) .unwrap() .vmo(vdso_vmo.dup().unwrap()); - let vdso_data_base = options.build().unwrap(); - let vdso_text_base = vdso_data_base + 0x4000; + let vdso_vmo_base = options.build().unwrap(); + let vdso_data_base = vdso_vmo_base + VDSO_VMO_LAYOUT.data_segment_offset; + let vdso_text_base = vdso_vmo_base + VDSO_VMO_LAYOUT.text_segment_offset; let data_perms = VmPerms::READ | VmPerms::WRITE; let text_perms = VmPerms::READ | VmPerms::EXEC; root_vmar - .protect(data_perms, vdso_data_base..vdso_data_base + PAGE_SIZE) + .protect( + data_perms, + vdso_data_base..(vdso_data_base + VDSO_VMO_LAYOUT.data_segment_size), + ) .unwrap(); root_vmar - .protect(text_perms, vdso_text_base..vdso_text_base + PAGE_SIZE) + .protect( + text_perms, + vdso_text_base..(vdso_text_base + VDSO_VMO_LAYOUT.text_segment_size), + ) .unwrap(); Some(vdso_text_base) } diff --git a/kernel/src/vdso.rs b/kernel/src/vdso.rs index c9f3d0fd8..207153200 100644 --- a/kernel/src/vdso.rs +++ b/kernel/src/vdso.rs @@ -19,6 +19,7 @@ use aster_rights::Rights; use aster_time::{read_monotonic_time, Instant}; use aster_util::coeff::Coeff; use ostd::{ + const_assert, mm::{UFrame, VmIo, VmIoOnce, PAGE_SIZE}, sync::SpinLock, Pod, @@ -186,6 +187,12 @@ impl VdsoData { } } +macro_rules! vdso_data_field_offset { + ($field:ident) => { + VDSO_VMO_LAYOUT.data_offset + core::mem::offset_of!(VdsoData, $field) + }; +} + /// The vDSO singleton. /// /// See [the module-level documentations](self) for more about the vDSO mechanism. @@ -201,9 +208,6 @@ struct Vdso { data_frame: UFrame, } -/// The size of the vDSO VMO. -pub const VDSO_VMO_SIZE: usize = 5 * PAGE_SIZE; - impl Vdso { /// Constructs a new `Vdso`, including an initialized `VdsoData` and a VMO of the vDSO. fn new() -> Self { @@ -211,10 +215,12 @@ impl Vdso { vdso_data.init(); let (vdso_vmo, data_frame) = { - let vmo_options = VmoOptions::::new(VDSO_VMO_SIZE); + let vmo_options = VmoOptions::::new(VDSO_VMO_LAYOUT.size); let vdso_vmo = vmo_options.alloc().unwrap(); // Write vDSO data to vDSO VMO. - vdso_vmo.write_bytes(0x80, vdso_data.as_bytes()).unwrap(); + vdso_vmo + .write_bytes(VDSO_VMO_LAYOUT.data_offset, vdso_data.as_bytes()) + .unwrap(); let vdso_lib_vmo = { let vdso_path = FsPath::new(AT_FDCWD, "/lib/x86_64-linux-gnu/vdso64.so").unwrap(); @@ -222,10 +228,12 @@ impl Vdso { let vdso_lib = fs_resolver.lookup(&vdso_path).unwrap(); vdso_lib.inode().page_cache().unwrap() }; - let mut vdso_text = Box::new([0u8; PAGE_SIZE]); + let mut vdso_text = Box::new([0u8; VDSO_VMO_LAYOUT.text_segment_size]); vdso_lib_vmo.read_bytes(0, &mut *vdso_text).unwrap(); // Write vDSO library to vDSO VMO. - vdso_vmo.write_bytes(0x4000, &*vdso_text).unwrap(); + vdso_vmo + .write_bytes(VDSO_VMO_LAYOUT.text_segment_offset, &*vdso_text) + .unwrap(); let data_frame = vdso_vmo.try_commit_page(0).unwrap(); (vdso_vmo, data_frame) @@ -244,9 +252,13 @@ impl Vdso { data.update_high_res_instant(instant, instant_cycles); // Update begins. - self.data_frame.write_once(0x80, &1).unwrap(); + self.data_frame + .write_once(vdso_data_field_offset!(seq), &1) + .unwrap(); - self.data_frame.write_val(0x88, &instant_cycles).unwrap(); + self.data_frame + .write_val(vdso_data_field_offset!(last_cycles), &instant_cycles) + .unwrap(); for clock_id in HIGH_RES_CLOCK_IDS { self.update_data_frame_instant(clock_id, &mut data); } @@ -254,7 +266,9 @@ impl Vdso { // Update finishes. // FIXME: To synchronize with the vDSO library, this needs to be an atomic write with the // Release memory order. - self.data_frame.write_once(0x80, &0).unwrap(); + self.data_frame + .write_once(vdso_data_field_offset!(seq), &0) + .unwrap(); } fn update_coarse_res_instant(&self, instant: Instant) { @@ -263,7 +277,9 @@ impl Vdso { data.update_coarse_res_instant(instant); // Update begins. - self.data_frame.write_once(0x80, &1).unwrap(); + self.data_frame + .write_once(vdso_data_field_offset!(seq), &1) + .unwrap(); for clock_id in COARSE_RES_CLOCK_IDS { self.update_data_frame_instant(clock_id, &mut data); @@ -272,15 +288,20 @@ impl Vdso { // Update finishes. // FIXME: To synchronize with the vDSO library, this needs to be an atomic write with the // Release memory order. - self.data_frame.write_once(0x80, &0).unwrap(); + self.data_frame + .write_once(vdso_data_field_offset!(seq), &0) + .unwrap(); } /// Updates the requisite fields of the vDSO data in the frame. fn update_data_frame_instant(&self, clockid: ClockId, data: &mut VdsoData) { let clock_index = clockid as usize; - let secs_offset = 0xA0 + clock_index * 0x10; - let nanos_info_offset = 0xA8 + clock_index * 0x10; + let secs_offset = + vdso_data_field_offset!(basetime) + clock_index * size_of::(); + let nanos_info_offset = vdso_data_field_offset!(basetime) + + core::mem::offset_of!(VdsoInstant, nanos_info) + + clock_index * size_of::(); self.data_frame .write_val(secs_offset, &data.basetime[clock_index].secs) .unwrap(); @@ -340,3 +361,55 @@ pub(super) fn init() { pub(crate) fn vdso_vmo() -> Option> { VDSO.get().map(|vdso| vdso.vmo.clone()) } + +#[cfg(target_arch = "x86_64")] +pub const VDSO_VMO_LAYOUT: VdsoVmoLayout = VdsoVmoLayout { + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/x86/entry/vdso/vdso-layout.lds.S#L20 + data_segment_offset: 0, + data_segment_size: PAGE_SIZE, + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/x86/entry/vdso/vdso-layout.lds.S#L19 + text_segment_offset: 4 * PAGE_SIZE, + text_segment_size: PAGE_SIZE, + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/x86/include/asm/vvar.h#L51 + data_offset: 0x80, + + size: 5 * PAGE_SIZE, +}; + +#[cfg(target_arch = "riscv64")] +pub const VDSO_VMO_LAYOUT: VdsoVmoLayout = VdsoVmoLayout { + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/riscv/kernel/vdso.c#L247 + data_segment_offset: 0, + data_segment_size: PAGE_SIZE, + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/riscv/kernel/vdso.c#L256 + text_segment_offset: 2 * PAGE_SIZE, + text_segment_size: PAGE_SIZE, + // https://elixir.bootlin.com/linux/v6.2.10/source/arch/riscv/kernel/vdso.c#L47 + data_offset: 0, + + size: 3 * PAGE_SIZE, +}; + +pub struct VdsoVmoLayout { + pub data_segment_offset: usize, + pub data_segment_size: usize, + pub text_segment_offset: usize, + pub text_segment_size: usize, + pub data_offset: usize, + pub size: usize, +} + +const_assert!(VDSO_VMO_LAYOUT.data_segment_offset % PAGE_SIZE == 0); +const_assert!(VDSO_VMO_LAYOUT.data_segment_size % PAGE_SIZE == 0); +const_assert!(VDSO_VMO_LAYOUT.text_segment_offset % PAGE_SIZE == 0); +const_assert!(VDSO_VMO_LAYOUT.text_segment_size % PAGE_SIZE == 0); +const_assert!(VDSO_VMO_LAYOUT.size % PAGE_SIZE == 0); + +// Ensure that the vDSO data at `VDSO_VMO_LAYOUT.data_offset` is in the data segment. +// +// `VDSO_VMO_LAYOUT.data_segment_offset <= VDSO_VMO_LAYOUT.data_offset` should also hold, but we +// skipped that assertion due to the broken `clippy::absurd_extreme_comparisons` lint. +const_assert!( + VDSO_VMO_LAYOUT.data_offset + size_of::() + <= VDSO_VMO_LAYOUT.data_segment_offset + VDSO_VMO_LAYOUT.data_segment_size +);