Refactor implicit `Arc` APIs for DMA
This commit is contained in:
parent
a6520880ab
commit
4e2bdc65de
|
|
@ -379,7 +379,7 @@ pub struct BioSegment {
|
|||
#[derive(Debug)]
|
||||
struct BioSegmentInner {
|
||||
/// Internal DMA slice.
|
||||
dma_slice: Slice<DmaStream>,
|
||||
dma_slice: Slice<Arc<DmaStream>>,
|
||||
/// Whether the segment is allocated from the pool.
|
||||
from_pool: bool,
|
||||
}
|
||||
|
|
@ -435,7 +435,7 @@ impl BioSegment {
|
|||
.unwrap();
|
||||
let dma_stream = DmaStream::map(segment.into(), direction.into(), false).unwrap();
|
||||
BioSegmentInner {
|
||||
dma_slice: Slice::new(dma_stream, offset..offset + len),
|
||||
dma_slice: Slice::new(Arc::new(dma_stream), offset..offset + len),
|
||||
from_pool: false,
|
||||
}
|
||||
});
|
||||
|
|
@ -451,7 +451,7 @@ impl BioSegment {
|
|||
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
|
||||
Self {
|
||||
inner: Arc::new(BioSegmentInner {
|
||||
dma_slice: Slice::new(dma_stream, 0..len),
|
||||
dma_slice: Slice::new(Arc::new(dma_stream), 0..len),
|
||||
from_pool: false,
|
||||
}),
|
||||
}
|
||||
|
|
@ -478,7 +478,7 @@ impl BioSegment {
|
|||
}
|
||||
|
||||
/// Returns the inner DMA slice.
|
||||
pub fn inner_dma_slice(&self) -> &Slice<DmaStream> {
|
||||
pub fn inner_dma_slice(&self) -> &Slice<Arc<DmaStream>> {
|
||||
&self.inner.dma_slice
|
||||
}
|
||||
|
||||
|
|
@ -531,7 +531,7 @@ impl BioSegmentInner {
|
|||
/// the `DmaStream`.
|
||||
// TODO: Use a more advanced allocation algorithm to replace the naive one to improve efficiency.
|
||||
struct BioSegmentPool {
|
||||
pool: DmaStream,
|
||||
pool: Arc<DmaStream>,
|
||||
total_blocks: usize,
|
||||
direction: BioDirection,
|
||||
manager: SpinLock<PoolSlotManager>,
|
||||
|
|
@ -567,7 +567,7 @@ impl BioSegmentPool {
|
|||
});
|
||||
|
||||
Self {
|
||||
pool,
|
||||
pool: Arc::new(pool),
|
||||
total_blocks,
|
||||
direction,
|
||||
manager,
|
||||
|
|
|
|||
|
|
@ -16,16 +16,16 @@ use spin::Once;
|
|||
use crate::dma_pool::{DmaPool, DmaSegment};
|
||||
|
||||
pub struct TxBuffer {
|
||||
dma_stream: DmaStream,
|
||||
dma_stream: Arc<DmaStream>,
|
||||
nbytes: usize,
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, BottomHalfDisabled>,
|
||||
pool: &'static SpinLock<LinkedList<Arc<DmaStream>>, BottomHalfDisabled>,
|
||||
}
|
||||
|
||||
impl TxBuffer {
|
||||
pub fn new<H: Pod>(
|
||||
header: &H,
|
||||
packet: &[u8],
|
||||
pool: &'static SpinLock<LinkedList<DmaStream>, BottomHalfDisabled>,
|
||||
pool: &'static SpinLock<LinkedList<Arc<DmaStream>>, BottomHalfDisabled>,
|
||||
) -> Self {
|
||||
let header = header.as_bytes();
|
||||
let nbytes = header.len() + packet.len();
|
||||
|
|
@ -38,7 +38,7 @@ impl TxBuffer {
|
|||
let segment = FrameAllocOptions::new()
|
||||
.alloc_segment(TX_BUFFER_LEN / PAGE_SIZE)
|
||||
.unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap())
|
||||
};
|
||||
|
||||
let tx_buffer = {
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ impl DmaPool {
|
|||
|
||||
#[derive(Debug)]
|
||||
struct DmaPage {
|
||||
storage: DmaStream,
|
||||
storage: Arc<DmaStream>,
|
||||
segment_size: usize,
|
||||
// `BitArray` is 64 bits, since each `DmaSegment` is bigger than 64 bytes,
|
||||
// there's no more than `PAGE_SIZE` / 64 = 64 `DmaSegment`s in a `DmaPage`.
|
||||
|
|
@ -160,7 +160,7 @@ impl DmaPage {
|
|||
};
|
||||
|
||||
Ok(Self {
|
||||
storage: dma_stream,
|
||||
storage: Arc::new(dma_stream),
|
||||
segment_size,
|
||||
allocated_segments: SpinLock::new(BitArray::ZERO),
|
||||
pool,
|
||||
|
|
@ -218,7 +218,7 @@ impl HasDaddr for DmaPage {
|
|||
/// Each `DmaSegment`'s daddr must be aligned with its size.
|
||||
#[derive(Debug)]
|
||||
pub struct DmaSegment {
|
||||
dma_stream: DmaStream,
|
||||
dma_stream: Arc<DmaStream>,
|
||||
start_addr: Daddr,
|
||||
size: usize,
|
||||
page: Weak<DmaPage>,
|
||||
|
|
|
|||
|
|
@ -109,8 +109,8 @@ struct DeviceInner {
|
|||
features: VirtioBlockFeature,
|
||||
queue: SpinLock<VirtQueue>,
|
||||
transport: SpinLock<Box<dyn VirtioTransport>>,
|
||||
block_requests: DmaStream,
|
||||
block_responses: DmaStream,
|
||||
block_requests: Arc<DmaStream>,
|
||||
block_responses: Arc<DmaStream>,
|
||||
id_allocator: SpinLock<IdAlloc>,
|
||||
submitted_requests: SpinLock<BTreeMap<u16, SubmittedRequest>>,
|
||||
}
|
||||
|
|
@ -143,12 +143,12 @@ impl DeviceInner {
|
|||
.expect("create virtqueue failed");
|
||||
let block_requests = {
|
||||
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap())
|
||||
};
|
||||
assert!(Self::QUEUE_SIZE as usize * REQ_SIZE <= block_requests.size());
|
||||
let block_responses = {
|
||||
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::Bidirectional, false).unwrap())
|
||||
};
|
||||
assert!(Self::QUEUE_SIZE as usize * RESP_SIZE <= block_responses.size());
|
||||
|
||||
|
|
@ -268,7 +268,7 @@ impl DeviceInner {
|
|||
.zeroed(false)
|
||||
.alloc_segment(1)
|
||||
.unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap())
|
||||
};
|
||||
let device_id_slice = Slice::new(&device_id_stream, 0..MAX_ID_LENGTH);
|
||||
let outputs = vec![&device_id_slice, &resp_slice];
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ pub struct ConsoleDevice {
|
|||
transport: SpinLock<Box<dyn VirtioTransport>>,
|
||||
receive_queue: SpinLock<VirtQueue>,
|
||||
transmit_queue: SpinLock<VirtQueue>,
|
||||
send_buffer: DmaStream,
|
||||
receive_buffer: DmaStream,
|
||||
send_buffer: Arc<DmaStream>,
|
||||
receive_buffer: Arc<DmaStream>,
|
||||
#[expect(clippy::box_collection)]
|
||||
callbacks: Rcu<Box<Vec<&'static ConsoleCallback>>>,
|
||||
}
|
||||
|
|
@ -99,12 +99,12 @@ impl ConsoleDevice {
|
|||
|
||||
let send_buffer = {
|
||||
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::ToDevice, false).unwrap())
|
||||
};
|
||||
|
||||
let receive_buffer = {
|
||||
let segment = FrameAllocOptions::new().alloc_segment(1).unwrap();
|
||||
DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap()
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap())
|
||||
};
|
||||
|
||||
let device = Arc::new(Self {
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ impl InputDevice {
|
|||
/// each of which is large enough to contain a `VirtioInputEvent`.
|
||||
#[derive(Debug)]
|
||||
struct EventTable {
|
||||
stream: DmaStream,
|
||||
stream: Arc<DmaStream>,
|
||||
num_events: usize,
|
||||
}
|
||||
|
||||
|
|
@ -270,7 +270,8 @@ impl EventTable {
|
|||
.iter()
|
||||
.all(|b| *b == 0));
|
||||
|
||||
let stream = DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap();
|
||||
let stream =
|
||||
Arc::new(DmaStream::map(segment.into(), DmaDirection::FromDevice, false).unwrap());
|
||||
Self { stream, num_events }
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ impl Debug for NetworkDevice {
|
|||
}
|
||||
}
|
||||
|
||||
static TX_BUFFER_POOL: SpinLock<LinkedList<DmaStream>, BottomHalfDisabled> =
|
||||
static TX_BUFFER_POOL: SpinLock<LinkedList<Arc<DmaStream>>, BottomHalfDisabled> =
|
||||
SpinLock::new(LinkedList::new());
|
||||
|
||||
const QUEUE_RECV: u16 = 0;
|
||||
|
|
|
|||
|
|
@ -12,7 +12,8 @@ use spin::Once;
|
|||
|
||||
const RX_BUFFER_LEN: usize = 4096;
|
||||
pub static RX_BUFFER_POOL: Once<Arc<DmaPool>> = Once::new();
|
||||
pub static TX_BUFFER_POOL: Once<SpinLock<LinkedList<DmaStream>, BottomHalfDisabled>> = Once::new();
|
||||
pub static TX_BUFFER_POOL: Once<SpinLock<LinkedList<Arc<DmaStream>>, BottomHalfDisabled>> =
|
||||
Once::new();
|
||||
|
||||
pub fn init() {
|
||||
const POOL_INIT_SIZE: usize = 32;
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::sync::Arc;
|
||||
|
||||
use aster_network::{DmaSegment, RxBuffer, TxBuffer};
|
||||
use aster_util::mem_obj_slice::Slice;
|
||||
use ostd::mm::{DmaCoherent, DmaStream, HasDaddr, HasSize};
|
||||
|
|
@ -13,29 +15,34 @@ pub trait DmaBuf: HasDaddr {
|
|||
fn len(&self) -> usize;
|
||||
}
|
||||
|
||||
impl DmaBuf for DmaStream {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
macro_rules! impl_dma_buf_for {
|
||||
($($t:ty),*) => {
|
||||
$(
|
||||
impl DmaBuf for $t {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaBuf for Slice<$t> {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl DmaBuf for Slice<DmaStream> {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaBuf for Slice<&DmaStream> {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaBuf for DmaCoherent {
|
||||
fn len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
}
|
||||
impl_dma_buf_for!(
|
||||
DmaStream,
|
||||
&DmaStream,
|
||||
Arc<DmaStream>,
|
||||
&Arc<DmaStream>,
|
||||
DmaCoherent,
|
||||
&DmaCoherent,
|
||||
Arc<DmaCoherent>,
|
||||
&Arc<DmaCoherent>
|
||||
);
|
||||
|
||||
impl DmaBuf for DmaSegment {
|
||||
fn len(&self) -> usize {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
//! Virtqueue
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
use core::{
|
||||
mem::{offset_of, size_of},
|
||||
sync::atomic::{fence, Ordering},
|
||||
|
|
@ -37,11 +37,11 @@ pub enum QueueError {
|
|||
#[derive(Debug)]
|
||||
pub struct VirtQueue {
|
||||
/// Descriptor table
|
||||
descs: Vec<SafePtr<Descriptor, DmaCoherent>>,
|
||||
descs: Vec<SafePtr<Descriptor, Arc<DmaCoherent>>>,
|
||||
/// Available ring
|
||||
avail: SafePtr<AvailRing, DmaCoherent>,
|
||||
avail: SafePtr<AvailRing, Arc<DmaCoherent>>,
|
||||
/// Used ring
|
||||
used: SafePtr<UsedRing, DmaCoherent>,
|
||||
used: SafePtr<UsedRing, Arc<DmaCoherent>>,
|
||||
/// Notify configuration manager
|
||||
notify_config: ConfigManager<u32>,
|
||||
|
||||
|
|
@ -98,13 +98,13 @@ impl VirtQueue {
|
|||
|
||||
continue_segment.split(seg1_frames * align_size)
|
||||
};
|
||||
let desc_frame_ptr: SafePtr<Descriptor, DmaCoherent> =
|
||||
SafePtr::new(DmaCoherent::map(seg1.into(), true).unwrap(), 0);
|
||||
let mut avail_frame_ptr: SafePtr<AvailRing, DmaCoherent> =
|
||||
let desc_frame_ptr: SafePtr<Descriptor, Arc<DmaCoherent>> =
|
||||
SafePtr::new(Arc::new(DmaCoherent::map(seg1.into(), true).unwrap()), 0);
|
||||
let mut avail_frame_ptr: SafePtr<AvailRing, Arc<DmaCoherent>> =
|
||||
desc_frame_ptr.clone().cast();
|
||||
avail_frame_ptr.byte_add(desc_size);
|
||||
let used_frame_ptr: SafePtr<UsedRing, DmaCoherent> =
|
||||
SafePtr::new(DmaCoherent::map(seg2.into(), true).unwrap(), 0);
|
||||
let used_frame_ptr: SafePtr<UsedRing, Arc<DmaCoherent>> =
|
||||
SafePtr::new(Arc::new(DmaCoherent::map(seg2.into(), true).unwrap()), 0);
|
||||
(desc_frame_ptr, avail_frame_ptr, used_frame_ptr)
|
||||
} else {
|
||||
if size > 256 {
|
||||
|
|
@ -112,27 +112,33 @@ impl VirtQueue {
|
|||
}
|
||||
(
|
||||
SafePtr::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
Arc::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
0,
|
||||
),
|
||||
SafePtr::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
Arc::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
0,
|
||||
),
|
||||
SafePtr::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
Arc::new(
|
||||
DmaCoherent::map(
|
||||
FrameAllocOptions::new().alloc_segment(1).unwrap().into(),
|
||||
true,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
0,
|
||||
),
|
||||
)
|
||||
|
|
@ -234,7 +240,7 @@ impl VirtQueue {
|
|||
let avail_slot = self.avail_idx & (self.queue_size - 1);
|
||||
|
||||
{
|
||||
let ring_ptr: SafePtr<[u16; 64], &DmaCoherent> =
|
||||
let ring_ptr: SafePtr<[u16; 64], &Arc<DmaCoherent>> =
|
||||
field_ptr!(&self.avail, AvailRing, ring);
|
||||
let mut ring_slot_ptr = ring_ptr.cast::<u16>();
|
||||
ring_slot_ptr.add(avail_slot as usize);
|
||||
|
|
@ -419,7 +425,7 @@ pub struct Descriptor {
|
|||
next: u16,
|
||||
}
|
||||
|
||||
type DescriptorPtr<'a> = SafePtr<Descriptor, &'a DmaCoherent, TRightSet<TRights![Dup, Write]>>;
|
||||
type DescriptorPtr<'a> = SafePtr<Descriptor, &'a Arc<DmaCoherent>, TRightSet<TRights![Dup, Write]>>;
|
||||
|
||||
fn set_dma_buf<T: DmaBuf>(desc_ptr: &DescriptorPtr, buf: &T) {
|
||||
// TODO: skip the empty dma buffer or just return error?
|
||||
|
|
|
|||
|
|
@ -98,9 +98,9 @@ impl VirtioTransport for VirtioMmioTransport {
|
|||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
driver_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
device_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, Arc<DmaCoherent>>,
|
||||
driver_ptr: &SafePtr<AvailRing, Arc<DmaCoherent>>,
|
||||
device_ptr: &SafePtr<UsedRing, Arc<DmaCoherent>>,
|
||||
) -> Result<(), VirtioTransportError> {
|
||||
field_ptr!(&self.layout, VirtioMmioLayout, queue_sel)
|
||||
.write_once(&(idx as u32))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use core::fmt::Debug;
|
||||
|
||||
use aster_util::safe_ptr::SafePtr;
|
||||
|
|
@ -73,9 +73,9 @@ pub trait VirtioTransport: Sync + Send + Debug {
|
|||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, Arc<DmaCoherent>>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, Arc<DmaCoherent>>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, Arc<DmaCoherent>>,
|
||||
) -> Result<(), VirtioTransportError>;
|
||||
|
||||
/// The max queue size of one virtqueue.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use core::fmt::Debug;
|
||||
|
||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||
|
|
@ -77,9 +77,9 @@ impl VirtioTransport for VirtioPciModernTransport {
|
|||
&mut self,
|
||||
idx: u16,
|
||||
queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, Arc<DmaCoherent>>,
|
||||
avail_ring_ptr: &SafePtr<AvailRing, Arc<DmaCoherent>>,
|
||||
used_ring_ptr: &SafePtr<UsedRing, Arc<DmaCoherent>>,
|
||||
) -> Result<(), VirtioTransportError> {
|
||||
if idx >= self.num_queues() {
|
||||
return Err(VirtioTransportError::InvalidArgs);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use core::fmt::Debug;
|
||||
|
||||
use aster_util::safe_ptr::SafePtr;
|
||||
|
|
@ -163,9 +163,9 @@ impl VirtioTransport for VirtioPciLegacyTransport {
|
|||
&mut self,
|
||||
idx: u16,
|
||||
_queue_size: u16,
|
||||
descriptor_ptr: &SafePtr<Descriptor, DmaCoherent>,
|
||||
_avail_ring_ptr: &SafePtr<AvailRing, DmaCoherent>,
|
||||
_used_ring_ptr: &SafePtr<UsedRing, DmaCoherent>,
|
||||
descriptor_ptr: &SafePtr<Descriptor, Arc<DmaCoherent>>,
|
||||
_avail_ring_ptr: &SafePtr<AvailRing, Arc<DmaCoherent>>,
|
||||
_used_ring_ptr: &SafePtr<UsedRing, Arc<DmaCoherent>>,
|
||||
) -> Result<(), VirtioTransportError> {
|
||||
// When using the legacy interface, there was no mechanism to negotiate
|
||||
// the queue size! The transitional driver MUST retrieve the `Queue Size`
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
//! [`IoMem`]: ostd::io::IoMem
|
||||
//! [`DmaStream`]: ostd::mm::dma::DmaStream
|
||||
|
||||
use alloc::sync::Arc;
|
||||
use core::{borrow::Borrow, fmt::Debug, ops::Range};
|
||||
|
||||
use ostd::mm::{
|
||||
|
|
@ -115,7 +116,8 @@ impl<MemObj: HasSize + HasVmReaderWriter<Types = VmReaderWriterResult>> HasVmRea
|
|||
}
|
||||
|
||||
// A handy implementation for streaming DMA slice.
|
||||
impl<MemObj: HasSize + Borrow<DmaStream>> Slice<MemObj> {
|
||||
// TODO: Implement the `sync()` method also for `Slice<DmaStream>`/`Slice<&DmaStream>`.
|
||||
impl<MemObj: HasSize + Borrow<Arc<DmaStream>>> Slice<MemObj> {
|
||||
/// Synchronizes the slice of streaming DMA mapping with the device.
|
||||
///
|
||||
/// The method will call [`DmaStream::sync`] with the offset range of this slice.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
use alloc::sync::Arc;
|
||||
use core::ops::Deref;
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
|
@ -29,13 +28,8 @@ cfg_if! {
|
|||
///
|
||||
/// The mapping will be destroyed automatically when
|
||||
/// the object is dropped.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DmaCoherent {
|
||||
inner: Arc<DmaCoherentInner>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DmaCoherentInner {
|
||||
pub struct DmaCoherent {
|
||||
segment: USegment,
|
||||
start_daddr: Daddr,
|
||||
is_cache_coherent: bool,
|
||||
|
|
@ -101,11 +95,9 @@ impl DmaCoherent {
|
|||
};
|
||||
|
||||
Ok(Self {
|
||||
inner: Arc::new(DmaCoherentInner {
|
||||
segment,
|
||||
start_daddr,
|
||||
is_cache_coherent,
|
||||
}),
|
||||
segment,
|
||||
start_daddr,
|
||||
is_cache_coherent,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -113,11 +105,11 @@ impl DmaCoherent {
|
|||
impl Deref for DmaCoherent {
|
||||
type Target = USegment;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner.segment
|
||||
&self.segment
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DmaCoherentInner {
|
||||
impl Drop for DmaCoherent {
|
||||
fn drop(&mut self) {
|
||||
let paddr = self.segment.paddr();
|
||||
let frame_count = self.segment.size() / PAGE_SIZE;
|
||||
|
|
@ -165,19 +157,19 @@ impl Drop for DmaCoherentInner {
|
|||
|
||||
impl HasPaddr for DmaCoherent {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.inner.segment.paddr()
|
||||
self.segment.paddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasSize for DmaCoherent {
|
||||
fn size(&self) -> usize {
|
||||
self.inner.segment.size()
|
||||
self.segment.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasDaddr for DmaCoherent {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.inner.start_daddr
|
||||
self.start_daddr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -185,10 +177,10 @@ impl HasVmReaderWriter for DmaCoherent {
|
|||
type Types = VmReaderWriterIdentity;
|
||||
|
||||
fn reader(&self) -> VmReader<'_, Infallible> {
|
||||
self.inner.segment.reader()
|
||||
self.segment.reader()
|
||||
}
|
||||
|
||||
fn writer(&self) -> VmWriter<'_, Infallible> {
|
||||
self.inner.segment.writer()
|
||||
self.segment.writer()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
allow(unfulfilled_lint_expectations)
|
||||
)]
|
||||
|
||||
use alloc::sync::Arc;
|
||||
use core::ops::Range;
|
||||
|
||||
use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError};
|
||||
|
|
@ -19,18 +18,12 @@ use crate::{
|
|||
},
|
||||
};
|
||||
|
||||
/// A streaming DMA mapping. Users must synchronize data
|
||||
/// before reading or after writing to ensure consistency.
|
||||
/// A streaming DMA mapping.
|
||||
///
|
||||
/// The mapping is automatically destroyed when this object
|
||||
/// is dropped.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DmaStream {
|
||||
inner: Arc<DmaStreamInner>,
|
||||
}
|
||||
|
||||
/// Users must synchronize data before reading or after writing to ensure
|
||||
/// consistency.
|
||||
#[derive(Debug)]
|
||||
struct DmaStreamInner {
|
||||
pub struct DmaStream {
|
||||
segment: USegment,
|
||||
start_daddr: Daddr,
|
||||
/// TODO: remove this field when on x86.
|
||||
|
|
@ -97,12 +90,10 @@ impl DmaStream {
|
|||
};
|
||||
|
||||
Ok(Self {
|
||||
inner: Arc::new(DmaStreamInner {
|
||||
segment,
|
||||
start_daddr,
|
||||
is_cache_coherent,
|
||||
direction,
|
||||
}),
|
||||
segment,
|
||||
start_daddr,
|
||||
is_cache_coherent,
|
||||
direction,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -113,12 +104,12 @@ impl DmaStream {
|
|||
/// there is a chance that the device is updating
|
||||
/// the memory. Do this at your own risk.
|
||||
pub fn segment(&self) -> &USegment {
|
||||
&self.inner.segment
|
||||
&self.segment
|
||||
}
|
||||
|
||||
/// Returns the DMA direction.
|
||||
pub fn direction(&self) -> DmaDirection {
|
||||
self.inner.direction
|
||||
self.direction
|
||||
}
|
||||
|
||||
/// Synchronizes the streaming DMA mapping with the device.
|
||||
|
|
@ -142,10 +133,10 @@ impl DmaStream {
|
|||
if _byte_range.end > self.size() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
if self.inner.is_cache_coherent {
|
||||
if self.is_cache_coherent {
|
||||
return Ok(());
|
||||
}
|
||||
let _start_va = crate::mm::paddr_to_vaddr(self.inner.segment.paddr()) as *const u8;
|
||||
let _start_va = crate::mm::paddr_to_vaddr(self.segment.paddr()) as *const u8;
|
||||
// TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here.
|
||||
for _i in _byte_range.step_by(64) {
|
||||
// TODO: Call the cache line flush command in the corresponding architecture.
|
||||
|
|
@ -159,11 +150,11 @@ impl DmaStream {
|
|||
|
||||
impl HasDaddr for DmaStream {
|
||||
fn daddr(&self) -> Daddr {
|
||||
self.inner.start_daddr
|
||||
self.start_daddr
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DmaStreamInner {
|
||||
impl Drop for DmaStream {
|
||||
fn drop(&mut self) {
|
||||
let paddr = self.segment.paddr();
|
||||
let frame_count = self.segment.size() / PAGE_SIZE;
|
||||
|
|
@ -201,28 +192,28 @@ impl HasVmReaderWriter for DmaStream {
|
|||
type Types = VmReaderWriterResult;
|
||||
|
||||
fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
|
||||
if self.inner.direction == DmaDirection::ToDevice {
|
||||
if self.direction == DmaDirection::ToDevice {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
Ok(self.inner.segment.reader())
|
||||
Ok(self.segment.reader())
|
||||
}
|
||||
|
||||
fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
|
||||
if self.inner.direction == DmaDirection::FromDevice {
|
||||
if self.direction == DmaDirection::FromDevice {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
Ok(self.inner.segment.writer())
|
||||
Ok(self.segment.writer())
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for DmaStream {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.inner.segment.paddr()
|
||||
self.segment.paddr()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasSize for DmaStream {
|
||||
fn size(&self) -> usize {
|
||||
self.inner.segment.size()
|
||||
self.segment.size()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue