asterinas/ostd/src/mm/dma/dma_coherent.rs

285 lines
10 KiB
Rust
Raw Normal View History

2024-01-03 03:22:36 +00:00
// SPDX-License-Identifier: MPL-2.0
2023-11-28 17:05:00 +00:00
use alloc::sync::Arc;
use core::ops::Deref;
2024-08-09 04:27:03 +00:00
use cfg_if::cfg_if;
2024-03-11 06:02:42 +00:00
2023-11-28 17:05:00 +00:00
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr};
use crate::{
arch::iommu,
mm::{
dma::{dma_type, Daddr, DmaType},
2024-08-05 14:05:20 +00:00
io::VmIoOnce,
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_prop::CachePolicy,
DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter,
PAGE_SIZE,
},
2024-08-05 14:05:20 +00:00
prelude::*,
};
2023-11-28 17:05:00 +00:00
2024-08-09 04:27:03 +00:00
cfg_if! {
if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
2024-08-09 04:27:03 +00:00
use ::tdx_guest::tdx_is_enabled;
use crate::arch::tdx_guest;
}
}
2023-11-28 17:05:00 +00:00
/// A coherent (or consistent) DMA mapping,
/// which guarantees that the device and the CPU can
/// access the data in parallel.
///
/// The mapping will be destroyed automatically when
/// the object is dropped.
#[derive(Debug, Clone)]
pub struct DmaCoherent {
inner: Arc<DmaCoherentInner>,
}
#[derive(Debug)]
struct DmaCoherentInner {
segment: DynUSegment,
2023-11-28 17:05:00 +00:00
start_daddr: Daddr,
is_cache_coherent: bool,
}
impl DmaCoherent {
/// Creates a coherent DMA mapping backed by `segment`.
2023-11-28 17:05:00 +00:00
///
/// The `is_cache_coherent` argument specifies whether
/// the target device that the DMA mapping is prepared for
/// can access the main memory in a CPU cache coherent way
/// or not.
///
/// The method fails if any part of the given `segment`
2023-11-28 17:05:00 +00:00
/// already belongs to a DMA mapping.
2024-08-05 14:05:20 +00:00
pub fn map(
segment: DynUSegment,
2024-08-05 14:05:20 +00:00
is_cache_coherent: bool,
) -> core::result::Result<Self, DmaError> {
let frame_count = segment.size() / PAGE_SIZE;
let start_paddr = segment.start_paddr();
2023-11-28 17:05:00 +00:00
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
return Err(DmaError::AlreadyMapped);
}
// Ensure that the addresses used later will not overflow
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
2023-11-28 17:05:00 +00:00
if !is_cache_coherent {
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(start_paddr);
let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
2024-05-21 12:07:26 +00:00
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
unsafe {
page_table
.protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Uncacheable)
.unwrap();
2023-11-28 17:05:00 +00:00
}
}
let start_daddr = match dma_type() {
2024-03-11 06:02:42 +00:00
DmaType::Direct => {
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
2024-05-21 12:07:26 +00:00
// SAFETY:
2024-03-11 06:02:42 +00:00
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
if tdx_is_enabled() {
unsafe {
tdx_guest::unprotect_gpa_range(start_paddr, frame_count).unwrap();
}
}
start_paddr as Daddr
}
2023-11-28 17:05:00 +00:00
DmaType::Iommu => {
for i in 0..frame_count {
let paddr = start_paddr + (i * PAGE_SIZE);
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
2023-11-28 17:05:00 +00:00
unsafe {
iommu::map(paddr as Daddr, paddr).unwrap();
}
}
start_paddr as Daddr
}
};
Ok(Self {
inner: Arc::new(DmaCoherentInner {
segment,
2023-11-28 17:05:00 +00:00
start_daddr,
is_cache_coherent,
}),
})
}
2024-09-04 02:54:56 +00:00
/// Returns the number of bytes in the DMA mapping.
pub fn nbytes(&self) -> usize {
self.inner.segment.size()
2024-09-04 02:54:56 +00:00
}
2023-11-28 17:05:00 +00:00
}
impl HasDaddr for DmaCoherent {
fn daddr(&self) -> Daddr {
self.inner.start_daddr
}
}
impl Deref for DmaCoherent {
type Target = DynUSegment;
2023-11-28 17:05:00 +00:00
fn deref(&self) -> &Self::Target {
&self.inner.segment
2023-11-28 17:05:00 +00:00
}
}
impl Drop for DmaCoherentInner {
fn drop(&mut self) {
let frame_count = self.segment.size() / PAGE_SIZE;
let start_paddr = self.segment.start_paddr();
// Ensure that the addresses used later will not overflow
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
2023-11-28 17:05:00 +00:00
match dma_type() {
2024-03-11 06:02:42 +00:00
DmaType::Direct => {
#[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))]
2024-05-21 12:07:26 +00:00
// SAFETY:
2024-03-11 06:02:42 +00:00
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
if tdx_is_enabled() {
unsafe {
tdx_guest::protect_gpa_range(start_paddr, frame_count).unwrap();
}
}
}
2023-11-28 17:05:00 +00:00
DmaType::Iommu => {
for i in 0..frame_count {
let paddr = start_paddr + (i * PAGE_SIZE);
iommu::unmap(paddr).unwrap();
}
}
}
if !self.is_cache_coherent {
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(start_paddr);
let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
2024-05-21 12:07:26 +00:00
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
unsafe {
page_table
.protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Writeback)
.unwrap();
2023-11-28 17:05:00 +00:00
}
}
remove_dma_mapping(start_paddr, frame_count);
}
}
impl VmIo for DmaCoherent {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
self.inner.segment.read(offset, writer)
2023-11-28 17:05:00 +00:00
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
self.inner.segment.write(offset, reader)
2023-11-28 17:05:00 +00:00
}
}
2024-08-05 14:05:20 +00:00
impl VmIoOnce for DmaCoherent {
fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T> {
self.inner.segment.reader().skip(offset).read_once()
2024-08-05 14:05:20 +00:00
}
fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()> {
self.inner.segment.writer().skip(offset).write_once(new_val)
2024-08-05 14:05:20 +00:00
}
}
2023-11-28 17:05:00 +00:00
impl<'a> DmaCoherent {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
self.inner.segment.reader()
2023-11-28 17:05:00 +00:00
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
self.inner.segment.writer()
2023-11-28 17:05:00 +00:00
}
}
impl HasPaddr for DmaCoherent {
fn paddr(&self) -> Paddr {
self.inner.segment.start_paddr()
2023-11-28 17:05:00 +00:00
}
}
2023-11-29 03:33:39 +00:00
#[cfg(ktest)]
2023-11-29 03:33:39 +00:00
mod test {
use alloc::vec;
2023-11-29 03:33:39 +00:00
use super::*;
2024-08-05 14:05:20 +00:00
use crate::mm::FrameAllocOptions;
2023-11-29 03:33:39 +00:00
#[ktest]
fn map_with_coherent_device() {
let segment = FrameAllocOptions::new()
.alloc_segment_with(1, |_| ())
2023-11-29 03:33:39 +00:00
.unwrap();
let dma_coherent = DmaCoherent::map(segment.clone().into(), true).unwrap();
assert!(dma_coherent.paddr() == segment.start_paddr());
2023-11-29 03:33:39 +00:00
}
#[ktest]
fn map_with_incoherent_device() {
let segment = FrameAllocOptions::new()
.alloc_segment_with(1, |_| ())
2023-11-29 03:33:39 +00:00
.unwrap();
let dma_coherent = DmaCoherent::map(segment.clone().into(), false).unwrap();
assert!(dma_coherent.paddr() == segment.start_paddr());
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let vaddr = paddr_to_vaddr(segment.start_paddr());
assert!(page_table.query(vaddr).unwrap().1.cache == CachePolicy::Uncacheable);
2023-11-29 03:33:39 +00:00
}
#[ktest]
fn duplicate_map() {
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
2023-11-29 03:33:39 +00:00
.unwrap();
let segment_child = segment.slice(&(0..PAGE_SIZE));
let _dma_coherent_parent = DmaCoherent::map(segment.into(), false);
let dma_coherent_child = DmaCoherent::map(segment_child.into(), false);
2023-11-29 03:33:39 +00:00
assert!(dma_coherent_child.is_err());
}
#[ktest]
fn read_and_write() {
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
2023-11-29 03:33:39 +00:00
.unwrap();
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
2023-11-29 03:33:39 +00:00
let buf_write = vec![1u8; 2 * PAGE_SIZE];
dma_coherent.write_bytes(0, &buf_write).unwrap();
let mut buf_read = vec![0u8; 2 * PAGE_SIZE];
dma_coherent.read_bytes(0, &mut buf_read).unwrap();
assert_eq!(buf_write, buf_read);
}
#[ktest]
fn reader_and_writer() {
let segment = FrameAllocOptions::new()
.alloc_segment_with(2, |_| ())
2023-11-29 03:33:39 +00:00
.unwrap();
let dma_coherent = DmaCoherent::map(segment.into(), false).unwrap();
2023-11-29 03:33:39 +00:00
let buf_write = vec![1u8; PAGE_SIZE];
let mut writer = dma_coherent.writer();
writer.write(&mut buf_write.as_slice().into());
writer.write(&mut buf_write.as_slice().into());
let mut buf_read = vec![0u8; 2 * PAGE_SIZE];
let buf_write = vec![1u8; 2 * PAGE_SIZE];
let mut reader = dma_coherent.reader();
reader.read(&mut buf_read.as_mut_slice().into());
assert_eq!(buf_read, buf_write);
}
}