diff --git a/kernel/src/syscall/shmat.rs b/kernel/src/syscall/shmat.rs index 0c787fc92..cf0aaa3eb 100644 --- a/kernel/src/syscall/shmat.rs +++ b/kernel/src/syscall/shmat.rs @@ -30,4 +30,125 @@ bitflags! { } pub fn sys_shmat(shmid: i32, addr: u64, flags: i32, ctx: &Context) -> Result { + debug!( + "[sys_shmat] shmid = {}, addr = {:#x}, flags = {}", + shmid, addr, flags + ); + if shmid < 0 { + return_errno!(Errno::EINVAL); + } + + let manager = SHM_OBJ_MANAGER.get().ok_or(Errno::EINVAL)?; + let manager_guard = manager.read(); + let shm_obj = manager_guard + .get_shm_obj(shmid as u64) + .ok_or(Errno::EINVAL)?; + + let shm_flags = ShmFlags::from_bits(flags as u32).ok_or(Errno::EINVAL)?; + let mut vm_perms = VmPerms::empty(); + if shm_flags.contains(ShmFlags::RDONLY) { + if !shm_obj.mode()?.contains(InodeMode::S_IRUSR) { + return_errno!(Errno::EACCES); + } + vm_perms |= VmPerms::READ; + } else { + vm_perms |= VmPerms::READ | VmPerms::WRITE; + } + if shm_flags.contains(ShmFlags::EXEC) { + if !shm_obj.mode()?.contains(InodeMode::S_IXUSR) { + return_errno!(Errno::EACCES); + } + vm_perms |= VmPerms::EXEC; + } + + let len = shm_obj.size().align_up(PAGE_SIZE); + let addr = if addr == 0 { + // If addr is 0, the system chooses the address + 0_usize + } else if shm_flags.contains(ShmFlags::RND) { + // If RND is set, align down the address to SHMLBA + addr.align_down(SHMLBA as u64) as usize + } else if !addr.is_multiple_of(SHMLBA as u64) { + // If the address is not aligned with SHMLBA, return error + return_errno!(Errno::EINVAL); + } else { + // Otherwise, use the provided address + addr as usize + }; + + // Check bounds + if len == 0 { + return_errno_with_message!(Errno::EINVAL, "shmat len cannot be zero"); + } + if len > isize::MAX as usize { + return_errno_with_message!(Errno::ENOMEM, "shmat len too large"); + } + if addr > isize::MAX as usize - len { + return_errno_with_message!(Errno::ENOMEM, "shmat (addr + len) too large"); + } + + // Check fixed address bounds if specified + if addr != 0 { + let map_end = addr.checked_add(len).ok_or(Errno::EINVAL)?; + if !(is_userspace_vaddr(addr) && is_userspace_vaddr(map_end - 1)) { + return_errno_with_message!(Errno::EINVAL, "Invalid shmat fixed addr"); + } + } + + // On x86, `PROT_WRITE` implies `PROT_READ`. + #[cfg(target_arch = "x86_64")] + let vm_perms = if !vm_perms.contains(VmPerms::READ) && vm_perms.contains(VmPerms::WRITE) { + vm_perms | VmPerms::READ + } else { + vm_perms + }; + + let mut may_perms = VmPerms::empty(); + if vm_perms.contains(VmPerms::READ) { + may_perms |= VmPerms::MAY_READ; + } + if vm_perms.contains(VmPerms::WRITE) { + may_perms |= VmPerms::MAY_WRITE; + } + if vm_perms.contains(VmPerms::EXEC) { + may_perms |= VmPerms::MAY_EXEC; + } + + let user_space = ctx.user_space(); + let root_vmar = user_space.vmar(); + if addr != 0 && !shm_flags.contains(ShmFlags::REMAP) { + let map_end = addr.checked_add(len).ok_or(Errno::EINVAL)?; + if root_vmar.query(addr..map_end).iter().next().is_some() { + return_errno!(Errno::EINVAL); + } + } + + // Mark shared memory as attached and map it + let attached_shm = shm_obj.set_attached(ctx.process.pid()); + + // Use the manager guard as a global coordinator to avoid deleting the + // shared memory object during mapping + drop(manager_guard); + + let vm_map_options = { + let mut options = root_vmar.new_map(len, vm_perms)?.may_perms(may_perms); + if addr != 0 { + options = options + .offset(addr) + .can_overwrite(shm_flags.contains(ShmFlags::REMAP)); + } + options = options.is_shared(true); + let vmo = shm_obj.vmo(); + options = options + .vmo(vmo) + .attached_shm(attached_shm) + .vmo_offset(0) + .handle_page_faults_around(); + options + }; + // FIXME: Need to check whether current process has permission to access + // the shared memory object. + let map_addr = vm_map_options.build()?; + + Ok(SyscallReturn::Return(map_addr as _)) } diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 59d4f7676..e386b12b5 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -21,9 +21,11 @@ use super::{RssType, interval_set::Interval, util::is_intersected, vmar_impls::R use crate::{ fs::utils::Inode, prelude::*, + process::Process, thread::exception::PageFaultInfo, vm::{ perms::VmPerms, + shared_mem::{AttachedShm, SHM_OBJ_MANAGER}, vmo::{CommitFlags, Vmo, VmoCommitError}, }, }; @@ -114,16 +116,26 @@ impl VmMapping { pub(super) fn new_fork(&self) -> VmMapping { VmMapping { + map_size: self.map_size, + map_to_addr: self.map_to_addr, mapped_mem: self.mapped_mem.dup(), inode: self.inode.clone(), - ..*self + is_shared: self.is_shared, + handle_page_faults_around: self.handle_page_faults_around, + perms: self.perms, } } pub(super) fn clone_for_remap_at(&self, va: Vaddr) -> VmMapping { - let mut vm_mapping = self.new_fork(); - vm_mapping.map_to_addr = va; - vm_mapping + VmMapping { + map_size: self.map_size, + map_to_addr: va, + mapped_mem: self.mapped_mem.dup(), + inode: self.inode.clone(), + is_shared: self.is_shared, + handle_page_faults_around: self.handle_page_faults_around, + perms: self.perms, + } } /// Returns the mapping's start address. @@ -146,11 +158,37 @@ impl VmMapping { self.perms } + /// Returns the shared memory attachment identifier if the mapping comes + /// from `shmat`. + pub fn attached_shm(&self) -> Option { + match &self.mapped_mem { + MappedMemory::Vmo(vmo) => vmo.attached_shm(), + _ => None, + } + } + /// Returns the inode of the file that backs the mapping. pub fn inode(&self) -> Option<&Arc> { self.inode.as_ref() } + /// Returns the mapped VMO offset (in bytes) if this mapping is VMO-backed. + pub fn vmo_offset(&self) -> Option { + match &self.mapped_mem { + MappedMemory::Vmo(vmo) => Some(vmo.offset()), + _ => None, + } + } + + /// Returns the underlying VMO size (in bytes) if this mapping is + /// VMO-backed. + pub fn vmo_size(&self) -> Option { + match &self.mapped_mem { + MappedMemory::Vmo(vmo) => Some(vmo.vmo().size()), + _ => None, + } + } + /// Returns a reference to the VMO if this mapping is VMO-backed. pub(super) fn vmo(&self) -> Option<&MappedVmo> { match &self.mapped_mem { @@ -716,9 +754,9 @@ pub(super) enum MappedMemory { Anonymous, /// Memory in a [`Vmo`]. /// - /// These pages are associated with regular files that are backed by the page cache. On-demand - /// population is possible by enabling page fault handlers to allocate pages and read the page - /// content from the disk. + /// These pages are associated with regular files or shared memory objects that are backed by + /// kernel-managed memory such as the page cache. On-demand population is possible by enabling + /// handlers to allocate pages and read the page content from the disk or shared memory. Vmo(MappedVmo), /// Device memory. /// @@ -748,11 +786,18 @@ pub(super) struct MappedVmo { /// Whether the VMO's writable mappings need to be tracked, and the /// mapping is writable to the VMO. is_writable_tracked: bool, + /// Shared memory attachment identifier if this mapping comes from `shmat`. + attached_shm: Option, } impl MappedVmo { /// Creates a `MappedVmo` used for the mapping. - pub(super) fn new(vmo: Arc, offset: usize, is_writable_tracked: bool) -> Result { + pub(super) fn new( + vmo: Arc, + offset: usize, + is_writable_tracked: bool, + attached_shm: Option, + ) -> Result { if is_writable_tracked { vmo.writable_mapping_status().map()?; } @@ -761,6 +806,7 @@ impl MappedVmo { vmo, offset, is_writable_tracked, + attached_shm, }) } @@ -835,12 +881,32 @@ impl MappedVmo { self.vmo.writable_mapping_status().increment(); } + if let Some(attached_shm) = self.attached_shm + && let Some(manager) = SHM_OBJ_MANAGER.get() + { + let pid = Process::current().map(|p| p.pid()).unwrap_or(0); + let shm_obj = { + let guard = manager.read(); + guard.get_shm_obj(attached_shm.shmid) + }; + if let Some(shm_obj) = shm_obj { + shm_obj.inc_nattch_for_mapping(pid); + } + } + Self { vmo: self.vmo.clone(), offset, is_writable_tracked: self.is_writable_tracked, + attached_shm: self.attached_shm, } } + + /// Returns the shared memory attachment identifier if this mapping comes + /// from `shmat`. + pub fn attached_shm(&self) -> Option { + self.attached_shm + } } impl Drop for MappedVmo { @@ -848,6 +914,24 @@ impl Drop for MappedVmo { if self.is_writable_tracked { self.vmo.writable_mapping_status().decrement(); } + + if let Some(attached_shm) = self.attached_shm + && let Some(manager) = SHM_OBJ_MANAGER.get() + { + let pid = Process::current().map(|p| p.pid()).unwrap_or(0); + let shm_obj = { + let guard = manager.read(); + guard.get_shm_obj(attached_shm.shmid) + }; + let Some(shm_obj) = shm_obj else { + return; + }; + + shm_obj.set_detached(pid); + if shm_obj.should_be_deleted() { + let _ = manager.write().try_delete_shm_obj(attached_shm.shmid); + } + } } } @@ -880,6 +964,9 @@ fn try_merge(left: &VmMapping, right: &VmMapping) -> Option { let r_vmo = r_vmo_obj.vmo(); if Arc::ptr_eq(l_vmo, r_vmo) { + if l_vmo_obj.attached_shm() != r_vmo_obj.attached_shm() { + return None; + } let is_offset_contiguous = l_vmo_obj.offset() + left.map_size() == r_vmo_obj.offset(); if !is_offset_contiguous { diff --git a/kernel/src/vm/vmar/vmar_impls/map.rs b/kernel/src/vm/vmar/vmar_impls/map.rs index 982aac930..90db92090 100644 --- a/kernel/src/vm/vmar/vmar_impls/map.rs +++ b/kernel/src/vm/vmar/vmar_impls/map.rs @@ -6,7 +6,7 @@ use super::{MappedMemory, MappedVmo, RssDelta, VmMapping, Vmar}; use crate::{ fs::{file_handle::Mappable, ramfs::memfd::MemfdInode}, prelude::*, - vm::{perms::VmPerms, vmo::Vmo}, + vm::{perms::VmPerms, shared_mem::AttachedShm, vmo::Vmo}, }; impl Vmar { @@ -59,6 +59,8 @@ pub struct VmarMapOptions<'a> { can_overwrite: bool, // Whether the mapping is mapped with `MAP_SHARED` is_shared: bool, + // Optional shared memory attachment identifier for `shmat` segments. + attached_shm: Option, // Whether the mapping needs to handle surrounding pages when handling page fault. handle_page_faults_around: bool, } @@ -79,6 +81,7 @@ impl<'a> VmarMapOptions<'a> { align: PAGE_SIZE, can_overwrite: false, is_shared: false, + attached_shm: None, handle_page_faults_around: false, } } @@ -181,6 +184,12 @@ impl<'a> VmarMapOptions<'a> { self } + /// Sets the shared memory attachment identifier for `shmat` mappings. + pub fn attached_shm(mut self, attached_shm: AttachedShm) -> Self { + self.attached_shm = Some(attached_shm); + self + } + /// Sets the mapping to handle surrounding pages when handling page fault. pub fn handle_page_faults_around(mut self) -> Self { self.handle_page_faults_around = true; @@ -232,6 +241,7 @@ impl<'a> VmarMapOptions<'a> { align, can_overwrite, is_shared, + attached_shm, handle_page_faults_around, } = self; @@ -300,6 +310,7 @@ impl<'a> VmarMapOptions<'a> { vmo.unwrap(), vmo_offset, is_writable_tracked, + attached_shm, )?); (mapped_mem, Some(inode), None) } @@ -307,7 +318,7 @@ impl<'a> VmarMapOptions<'a> { } } else if let Some(vmo) = vmo { ( - MappedMemory::Vmo(MappedVmo::new(vmo, vmo_offset, false)?), + MappedMemory::Vmo(MappedVmo::new(vmo, vmo_offset, false, attached_shm)?), None, None, )