2024-01-03 03:22:36 +00:00
// SPDX-License-Identifier: MPL-2.0
2023-11-28 17:05:00 +00:00
use alloc ::sync ::Arc ;
use core ::ops ::Deref ;
2024-08-09 04:27:03 +00:00
use cfg_if ::cfg_if ;
2024-03-11 06:02:42 +00:00
2023-11-28 17:05:00 +00:00
use super ::{ check_and_insert_dma_mapping , remove_dma_mapping , DmaError , HasDaddr } ;
2024-02-25 14:09:24 +00:00
use crate ::{
2024-08-12 08:11:45 +00:00
arch ::iommu ,
2024-05-26 17:53:44 +00:00
mm ::{
2024-02-25 14:09:24 +00:00
dma ::{ dma_type , Daddr , DmaType } ,
2024-08-05 14:05:20 +00:00
io ::VmIoOnce ,
2024-03-25 05:25:32 +00:00
kspace ::{ paddr_to_vaddr , KERNEL_PAGE_TABLE } ,
2024-05-05 12:51:38 +00:00
page_prop ::CachePolicy ,
2024-12-24 10:20:55 +00:00
DynUSegment , HasPaddr , Infallible , Paddr , PodOnce , UntypedMem , VmIo , VmReader , VmWriter ,
PAGE_SIZE ,
2024-02-25 14:09:24 +00:00
} ,
2024-08-05 14:05:20 +00:00
prelude ::* ,
2024-02-25 14:09:24 +00:00
} ;
2023-11-28 17:05:00 +00:00
2024-08-09 04:27:03 +00:00
cfg_if! {
2024-08-09 05:21:52 +00:00
if #[ cfg(all(target_arch = " x86_64 " , feature = " cvm_guest " )) ] {
2024-08-09 04:27:03 +00:00
use ::tdx_guest ::tdx_is_enabled ;
use crate ::arch ::tdx_guest ;
}
}
2023-11-28 17:05:00 +00:00
/// A coherent (or consistent) DMA mapping,
/// which guarantees that the device and the CPU can
/// access the data in parallel.
///
/// The mapping will be destroyed automatically when
/// the object is dropped.
#[ derive(Debug, Clone) ]
pub struct DmaCoherent {
inner : Arc < DmaCoherentInner > ,
}
#[ derive(Debug) ]
struct DmaCoherentInner {
2024-12-24 10:20:55 +00:00
segment : DynUSegment ,
2023-11-28 17:05:00 +00:00
start_daddr : Daddr ,
is_cache_coherent : bool ,
}
impl DmaCoherent {
2024-12-24 10:20:55 +00:00
/// Creates a coherent DMA mapping backed by `segment`.
2023-11-28 17:05:00 +00:00
///
/// The `is_cache_coherent` argument specifies whether
/// the target device that the DMA mapping is prepared for
/// can access the main memory in a CPU cache coherent way
/// or not.
///
2024-12-24 10:20:55 +00:00
/// The method fails if any part of the given `segment`
2023-11-28 17:05:00 +00:00
/// already belongs to a DMA mapping.
2024-08-05 14:05:20 +00:00
pub fn map (
2024-12-24 10:20:55 +00:00
segment : DynUSegment ,
2024-08-05 14:05:20 +00:00
is_cache_coherent : bool ,
) -> core ::result ::Result < Self , DmaError > {
2024-12-24 10:20:55 +00:00
let frame_count = segment . size ( ) / PAGE_SIZE ;
let start_paddr = segment . start_paddr ( ) ;
2023-11-28 17:05:00 +00:00
if ! check_and_insert_dma_mapping ( start_paddr , frame_count ) {
return Err ( DmaError ::AlreadyMapped ) ;
}
2024-03-25 07:49:02 +00:00
// Ensure that the addresses used later will not overflow
start_paddr . checked_add ( frame_count * PAGE_SIZE ) . unwrap ( ) ;
2023-11-28 17:05:00 +00:00
if ! is_cache_coherent {
2024-04-22 07:05:50 +00:00
let page_table = KERNEL_PAGE_TABLE . get ( ) . unwrap ( ) ;
2024-03-25 05:25:32 +00:00
let vaddr = paddr_to_vaddr ( start_paddr ) ;
let va_range = vaddr .. vaddr + ( frame_count * PAGE_SIZE ) ;
2024-05-21 12:07:26 +00:00
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
2024-03-25 05:25:32 +00:00
unsafe {
2024-04-28 16:09:26 +00:00
page_table
2024-08-12 08:11:45 +00:00
. protect_flush_tlb ( & va_range , | p | p . cache = CachePolicy ::Uncacheable )
2024-04-28 16:09:26 +00:00
. unwrap ( ) ;
2023-11-28 17:05:00 +00:00
}
}
let start_daddr = match dma_type ( ) {
2024-03-11 06:02:42 +00:00
DmaType ::Direct = > {
2024-08-09 05:21:52 +00:00
#[ cfg(all(target_arch = " x86_64 " , feature = " cvm_guest " )) ]
2024-05-21 12:07:26 +00:00
// SAFETY:
2024-03-11 06:02:42 +00:00
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the 'unprotect_gpa_range' function.
if tdx_is_enabled ( ) {
unsafe {
tdx_guest ::unprotect_gpa_range ( start_paddr , frame_count ) . unwrap ( ) ;
}
}
start_paddr as Daddr
}
2023-11-28 17:05:00 +00:00
DmaType ::Iommu = > {
for i in 0 .. frame_count {
let paddr = start_paddr + ( i * PAGE_SIZE ) ;
2024-12-24 10:20:55 +00:00
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `segment`.
2023-11-28 17:05:00 +00:00
unsafe {
iommu ::map ( paddr as Daddr , paddr ) . unwrap ( ) ;
}
}
start_paddr as Daddr
}
} ;
Ok ( Self {
inner : Arc ::new ( DmaCoherentInner {
2024-12-24 10:20:55 +00:00
segment ,
2023-11-28 17:05:00 +00:00
start_daddr ,
is_cache_coherent ,
} ) ,
} )
}
2024-09-04 02:54:56 +00:00
/// Returns the number of bytes in the DMA mapping.
pub fn nbytes ( & self ) -> usize {
2024-12-24 10:20:55 +00:00
self . inner . segment . size ( )
2024-09-04 02:54:56 +00:00
}
2023-11-28 17:05:00 +00:00
}
impl HasDaddr for DmaCoherent {
fn daddr ( & self ) -> Daddr {
self . inner . start_daddr
}
}
impl Deref for DmaCoherent {
2024-12-24 10:20:55 +00:00
type Target = DynUSegment ;
2023-11-28 17:05:00 +00:00
fn deref ( & self ) -> & Self ::Target {
2024-12-24 10:20:55 +00:00
& self . inner . segment
2023-11-28 17:05:00 +00:00
}
}
impl Drop for DmaCoherentInner {
fn drop ( & mut self ) {
2024-12-24 10:20:55 +00:00
let frame_count = self . segment . size ( ) / PAGE_SIZE ;
let start_paddr = self . segment . start_paddr ( ) ;
2024-03-25 07:49:02 +00:00
// Ensure that the addresses used later will not overflow
start_paddr . checked_add ( frame_count * PAGE_SIZE ) . unwrap ( ) ;
2023-11-28 17:05:00 +00:00
match dma_type ( ) {
2024-03-11 06:02:42 +00:00
DmaType ::Direct = > {
2024-08-09 05:21:52 +00:00
#[ cfg(all(target_arch = " x86_64 " , feature = " cvm_guest " )) ]
2024-05-21 12:07:26 +00:00
// SAFETY:
2024-03-11 06:02:42 +00:00
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `protect_gpa_range` function.
if tdx_is_enabled ( ) {
unsafe {
tdx_guest ::protect_gpa_range ( start_paddr , frame_count ) . unwrap ( ) ;
}
}
}
2023-11-28 17:05:00 +00:00
DmaType ::Iommu = > {
for i in 0 .. frame_count {
let paddr = start_paddr + ( i * PAGE_SIZE ) ;
iommu ::unmap ( paddr ) . unwrap ( ) ;
}
}
}
if ! self . is_cache_coherent {
2024-04-22 07:05:50 +00:00
let page_table = KERNEL_PAGE_TABLE . get ( ) . unwrap ( ) ;
2024-03-25 05:25:32 +00:00
let vaddr = paddr_to_vaddr ( start_paddr ) ;
let va_range = vaddr .. vaddr + ( frame_count * PAGE_SIZE ) ;
2024-05-21 12:07:26 +00:00
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
2024-03-25 05:25:32 +00:00
unsafe {
2024-04-28 16:09:26 +00:00
page_table
2024-08-12 08:11:45 +00:00
. protect_flush_tlb ( & va_range , | p | p . cache = CachePolicy ::Writeback )
2024-04-28 16:09:26 +00:00
. unwrap ( ) ;
2023-11-28 17:05:00 +00:00
}
}
remove_dma_mapping ( start_paddr , frame_count ) ;
}
}
impl VmIo for DmaCoherent {
2024-08-22 07:52:20 +00:00
fn read ( & self , offset : usize , writer : & mut VmWriter ) -> Result < ( ) > {
2024-12-24 10:20:55 +00:00
self . inner . segment . read ( offset , writer )
2023-11-28 17:05:00 +00:00
}
2024-08-22 07:52:20 +00:00
fn write ( & self , offset : usize , reader : & mut VmReader ) -> Result < ( ) > {
2024-12-24 10:20:55 +00:00
self . inner . segment . write ( offset , reader )
2023-11-28 17:05:00 +00:00
}
}
2024-08-05 14:05:20 +00:00
impl VmIoOnce for DmaCoherent {
fn read_once < T : PodOnce > ( & self , offset : usize ) -> Result < T > {
2024-12-24 10:20:55 +00:00
self . inner . segment . reader ( ) . skip ( offset ) . read_once ( )
2024-08-05 14:05:20 +00:00
}
fn write_once < T : PodOnce > ( & self , offset : usize , new_val : & T ) -> Result < ( ) > {
2024-12-24 10:20:55 +00:00
self . inner . segment . writer ( ) . skip ( offset ) . write_once ( new_val )
2024-08-05 14:05:20 +00:00
}
}
2023-11-28 17:05:00 +00:00
impl < ' a > DmaCoherent {
/// Returns a reader to read data from it.
2024-08-20 02:05:25 +00:00
pub fn reader ( & ' a self ) -> VmReader < ' a , Infallible > {
2024-12-24 10:20:55 +00:00
self . inner . segment . reader ( )
2023-11-28 17:05:00 +00:00
}
/// Returns a writer to write data into it.
2024-08-20 02:05:25 +00:00
pub fn writer ( & ' a self ) -> VmWriter < ' a , Infallible > {
2024-12-24 10:20:55 +00:00
self . inner . segment . writer ( )
2023-11-28 17:05:00 +00:00
}
}
impl HasPaddr for DmaCoherent {
fn paddr ( & self ) -> Paddr {
2024-12-24 10:20:55 +00:00
self . inner . segment . start_paddr ( )
2023-11-28 17:05:00 +00:00
}
}
2023-11-29 03:33:39 +00:00
2024-02-21 08:58:40 +00:00
#[ cfg(ktest) ]
2023-11-29 03:33:39 +00:00
mod test {
2024-02-25 14:09:24 +00:00
use alloc ::vec ;
2023-11-29 03:33:39 +00:00
use super ::* ;
2024-08-05 14:05:20 +00:00
use crate ::mm ::FrameAllocOptions ;
2023-11-29 03:33:39 +00:00
#[ ktest ]
fn map_with_coherent_device ( ) {
2024-12-24 10:20:55 +00:00
let segment = FrameAllocOptions ::new ( )
. alloc_segment_with ( 1 , | _ | ( ) )
2023-11-29 03:33:39 +00:00
. unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let dma_coherent = DmaCoherent ::map ( segment . clone ( ) . into ( ) , true ) . unwrap ( ) ;
assert! ( dma_coherent . paddr ( ) = = segment . start_paddr ( ) ) ;
2023-11-29 03:33:39 +00:00
}
#[ ktest ]
fn map_with_incoherent_device ( ) {
2024-12-24 10:20:55 +00:00
let segment = FrameAllocOptions ::new ( )
. alloc_segment_with ( 1 , | _ | ( ) )
2023-11-29 03:33:39 +00:00
. unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let dma_coherent = DmaCoherent ::map ( segment . clone ( ) . into ( ) , false ) . unwrap ( ) ;
assert! ( dma_coherent . paddr ( ) = = segment . start_paddr ( ) ) ;
2024-04-22 07:05:50 +00:00
let page_table = KERNEL_PAGE_TABLE . get ( ) . unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let vaddr = paddr_to_vaddr ( segment . start_paddr ( ) ) ;
2024-05-05 12:51:38 +00:00
assert! ( page_table . query ( vaddr ) . unwrap ( ) . 1. cache = = CachePolicy ::Uncacheable ) ;
2023-11-29 03:33:39 +00:00
}
#[ ktest ]
fn duplicate_map ( ) {
2024-12-24 10:20:55 +00:00
let segment = FrameAllocOptions ::new ( )
. alloc_segment_with ( 2 , | _ | ( ) )
2023-11-29 03:33:39 +00:00
. unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let segment_child = segment . slice ( & ( 0 .. PAGE_SIZE ) ) ;
let _dma_coherent_parent = DmaCoherent ::map ( segment . into ( ) , false ) ;
let dma_coherent_child = DmaCoherent ::map ( segment_child . into ( ) , false ) ;
2023-11-29 03:33:39 +00:00
assert! ( dma_coherent_child . is_err ( ) ) ;
}
#[ ktest ]
fn read_and_write ( ) {
2024-12-24 10:20:55 +00:00
let segment = FrameAllocOptions ::new ( )
. alloc_segment_with ( 2 , | _ | ( ) )
2023-11-29 03:33:39 +00:00
. unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let dma_coherent = DmaCoherent ::map ( segment . into ( ) , false ) . unwrap ( ) ;
2023-11-29 03:33:39 +00:00
let buf_write = vec! [ 1 u8 ; 2 * PAGE_SIZE ] ;
dma_coherent . write_bytes ( 0 , & buf_write ) . unwrap ( ) ;
let mut buf_read = vec! [ 0 u8 ; 2 * PAGE_SIZE ] ;
dma_coherent . read_bytes ( 0 , & mut buf_read ) . unwrap ( ) ;
assert_eq! ( buf_write , buf_read ) ;
}
#[ ktest ]
2024-08-26 07:31:58 +00:00
fn reader_and_writer ( ) {
2024-12-24 10:20:55 +00:00
let segment = FrameAllocOptions ::new ( )
. alloc_segment_with ( 2 , | _ | ( ) )
2023-11-29 03:33:39 +00:00
. unwrap ( ) ;
2024-12-24 10:20:55 +00:00
let dma_coherent = DmaCoherent ::map ( segment . into ( ) , false ) . unwrap ( ) ;
2023-11-29 03:33:39 +00:00
let buf_write = vec! [ 1 u8 ; PAGE_SIZE ] ;
let mut writer = dma_coherent . writer ( ) ;
writer . write ( & mut buf_write . as_slice ( ) . into ( ) ) ;
writer . write ( & mut buf_write . as_slice ( ) . into ( ) ) ;
let mut buf_read = vec! [ 0 u8 ; 2 * PAGE_SIZE ] ;
let buf_write = vec! [ 1 u8 ; 2 * PAGE_SIZE ] ;
let mut reader = dma_coherent . reader ( ) ;
reader . read ( & mut buf_read . as_mut_slice ( ) . into ( ) ) ;
assert_eq! ( buf_read , buf_write ) ;
}
}