asterinas/kernel/src/net/iface/common.rs

275 lines
9.5 KiB
Rust
Raw Normal View History

2024-01-03 03:22:36 +00:00
// SPDX-License-Identifier: MPL-2.0
use alloc::collections::btree_map::Entry;
2023-05-31 02:47:52 +00:00
use core::sync::atomic::{AtomicU64, Ordering};
2024-07-23 15:03:10 +00:00
use keyable_arc::KeyableArc;
2024-08-09 03:32:27 +00:00
use ostd::sync::{LocalIrqDisabled, WaitQueue};
2023-05-31 02:47:52 +00:00
use smoltcp::{
iface::{SocketHandle, SocketSet},
phy::Device,
wire::IpCidr,
};
use super::{
2024-07-23 15:03:10 +00:00
any_socket::{AnyBoundSocketInner, AnyRawSocket, AnyUnboundSocket, SocketFamily},
2023-05-31 02:47:52 +00:00
time::get_network_timestamp,
util::BindPortConfig,
AnyBoundSocket, Iface,
2023-05-31 02:47:52 +00:00
};
use crate::{net::socket::ip::Ipv4Address, prelude::*};
2023-05-31 02:47:52 +00:00
pub struct IfaceCommon {
2023-05-30 08:34:28 +00:00
interface: SpinLock<smoltcp::iface::Interface>,
sockets: SpinLock<SocketSet<'static>>,
2023-05-31 02:47:52 +00:00
used_ports: RwLock<BTreeMap<u16, usize>>,
2024-02-26 03:32:53 +00:00
/// The time should do next poll. We stores the total milliseconds since system boots up.
2023-05-31 02:47:52 +00:00
next_poll_at_ms: AtomicU64,
2024-07-23 15:03:10 +00:00
bound_sockets: RwLock<BTreeSet<KeyableArc<AnyBoundSocketInner>>>,
closing_sockets: SpinLock<BTreeSet<KeyableArc<AnyBoundSocketInner>>>,
2024-02-26 03:32:53 +00:00
/// The wait queue that background polling thread will sleep on
polling_wait_queue: WaitQueue,
2023-05-31 02:47:52 +00:00
}
impl IfaceCommon {
pub(super) fn new(interface: smoltcp::iface::Interface) -> Self {
let socket_set = SocketSet::new(Vec::new());
let used_ports = BTreeMap::new();
Self {
2023-05-30 08:34:28 +00:00
interface: SpinLock::new(interface),
sockets: SpinLock::new(socket_set),
2023-05-31 02:47:52 +00:00
used_ports: RwLock::new(used_ports),
next_poll_at_ms: AtomicU64::new(0),
bound_sockets: RwLock::new(BTreeSet::new()),
2024-07-23 15:03:10 +00:00
closing_sockets: SpinLock::new(BTreeSet::new()),
2024-02-26 03:32:53 +00:00
polling_wait_queue: WaitQueue::new(),
2023-05-31 02:47:52 +00:00
}
}
2024-07-29 02:40:00 +00:00
/// Acquires the lock to the interface.
///
/// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second.
2024-08-09 03:32:27 +00:00
pub(super) fn interface(&self) -> SpinLockGuard<smoltcp::iface::Interface, LocalIrqDisabled> {
self.interface.disable_irq().lock()
2023-05-31 02:47:52 +00:00
}
2024-07-29 02:40:00 +00:00
/// Acuqires the lock to the sockets.
///
/// *Lock ordering:* [`Self::sockets`] first, [`Self::interface`] second.
2024-08-09 03:32:27 +00:00
pub(super) fn sockets(
&self,
) -> SpinLockGuard<smoltcp::iface::SocketSet<'static>, LocalIrqDisabled> {
self.sockets.disable_irq().lock()
2023-05-31 02:47:52 +00:00
}
pub(super) fn ipv4_addr(&self) -> Option<Ipv4Address> {
2024-08-09 03:32:27 +00:00
self.interface.disable_irq().lock().ipv4_addr()
2023-05-31 02:47:52 +00:00
}
pub(super) fn netmask(&self) -> Option<Ipv4Address> {
2024-08-09 03:32:27 +00:00
let interface = self.interface.disable_irq().lock();
2023-05-31 02:47:52 +00:00
let ip_addrs = interface.ip_addrs();
ip_addrs.first().map(|cidr| match cidr {
IpCidr::Ipv4(ipv4_cidr) => ipv4_cidr.netmask(),
})
}
2024-02-26 03:32:53 +00:00
pub(super) fn polling_wait_queue(&self) -> &WaitQueue {
&self.polling_wait_queue
}
2023-05-31 02:47:52 +00:00
/// Alloc an unused port range from 49152 ~ 65535 (According to smoltcp docs)
fn alloc_ephemeral_port(&self) -> Result<u16> {
let mut used_ports = self.used_ports.write();
for port in IP_LOCAL_PORT_START..=IP_LOCAL_PORT_END {
2023-09-04 03:04:42 +00:00
if let Entry::Vacant(e) = used_ports.entry(port) {
e.insert(0);
2023-05-31 02:47:52 +00:00
return Ok(port);
}
}
return_errno_with_message!(Errno::EAGAIN, "no ephemeral port is available");
2023-05-31 02:47:52 +00:00
}
fn bind_port(&self, port: u16, can_reuse: bool) -> Result<()> {
let mut used_ports = self.used_ports.write();
if let Some(used_times) = used_ports.get_mut(&port) {
if *used_times == 0 || can_reuse {
2023-09-04 03:04:42 +00:00
*used_times += 1;
2023-05-31 02:47:52 +00:00
} else {
return_errno_with_message!(Errno::EADDRINUSE, "the address is already in use");
2023-05-31 02:47:52 +00:00
}
} else {
used_ports.insert(port, 1);
}
Ok(())
}
/// Release port number so the port can be used again. For reused port, the port may still be in use.
pub(super) fn release_port(&self, port: u16) {
let mut used_ports = self.used_ports.write();
if let Some(used_times) = used_ports.remove(&port) {
if used_times != 1 {
used_ports.insert(port, used_times - 1);
}
}
}
pub(super) fn bind_socket(
&self,
iface: Arc<dyn Iface>,
socket: Box<AnyUnboundSocket>,
2023-05-31 02:47:52 +00:00
config: BindPortConfig,
2024-07-23 15:03:10 +00:00
) -> core::result::Result<AnyBoundSocket, (Error, Box<AnyUnboundSocket>)> {
2023-05-31 02:47:52 +00:00
let port = if let Some(port) = config.port() {
port
} else {
match self.alloc_ephemeral_port() {
Ok(port) => port,
2024-01-07 15:55:23 +00:00
Err(err) => return Err((err, socket)),
2023-05-31 02:47:52 +00:00
}
};
2024-01-07 15:55:23 +00:00
if let Some(err) = self.bind_port(port, config.can_reuse()).err() {
return Err((err, socket));
2023-05-31 02:47:52 +00:00
}
2024-01-07 15:55:23 +00:00
let (handle, socket_family, observer) = match socket.into_raw() {
(AnyRawSocket::Tcp(tcp_socket), observer) => (
2024-08-09 03:32:27 +00:00
self.sockets.disable_irq().lock().add(tcp_socket),
2024-01-07 15:55:23 +00:00
SocketFamily::Tcp,
observer,
),
(AnyRawSocket::Udp(udp_socket), observer) => (
2024-08-09 03:32:27 +00:00
self.sockets.disable_irq().lock().add(udp_socket),
2024-01-07 15:55:23 +00:00
SocketFamily::Udp,
observer,
),
2023-05-31 02:47:52 +00:00
};
2024-01-07 15:55:23 +00:00
let bound_socket = AnyBoundSocket::new(iface, handle, port, socket_family, observer);
2024-07-23 15:03:10 +00:00
self.insert_bound_socket(bound_socket.inner());
2024-01-07 15:55:23 +00:00
2023-05-31 02:47:52 +00:00
Ok(bound_socket)
}
/// Remove a socket from the interface
pub(super) fn remove_socket(&self, handle: SocketHandle) {
2024-08-09 03:32:27 +00:00
self.sockets.disable_irq().lock().remove(handle);
2023-05-31 02:47:52 +00:00
}
pub(super) fn poll<D: Device + ?Sized>(&self, device: &mut D) {
2024-08-09 03:32:27 +00:00
let mut sockets = self.sockets.disable_irq().lock();
let mut interface = self.interface.disable_irq().lock();
2024-06-18 07:41:52 +00:00
2023-05-31 02:47:52 +00:00
let timestamp = get_network_timestamp();
2024-06-18 07:41:52 +00:00
let (has_events, poll_at) = {
let mut has_events = false;
let mut poll_at;
2024-08-23 08:30:03 +00:00
2024-06-18 07:41:52 +00:00
loop {
2024-08-23 08:30:03 +00:00
// `poll` transmits and receives a bounded number of packets. This loop ensures
// that all packets are transmitted and received. For details, see
// <https://github.com/smoltcp-rs/smoltcp/blob/8e3ea5c7f09a76f0a4988fda20cadc74eacdc0d8/src/iface/interface/mod.rs#L400-L405>.
2024-08-22 06:34:41 +00:00
while interface.poll(timestamp, device, &mut sockets) {
has_events = true;
}
2024-08-23 08:30:03 +00:00
// `poll_at` can return `Some(Instant::from_millis(0))`, which means `PollAt::Now`.
// For details, see
// <https://github.com/smoltcp-rs/smoltcp/blob/8e3ea5c7f09a76f0a4988fda20cadc74eacdc0d8/src/iface/interface/mod.rs#L478>.
2024-06-18 07:41:52 +00:00
poll_at = interface.poll_at(timestamp, &sockets);
let Some(instant) = poll_at else {
break;
};
if instant > timestamp {
break;
2023-08-28 09:37:59 +00:00
}
2024-06-18 07:41:52 +00:00
}
2024-08-23 08:30:03 +00:00
2024-06-18 07:41:52 +00:00
(has_events, poll_at)
};
2023-05-31 02:47:52 +00:00
2024-06-18 07:41:52 +00:00
// drop sockets here to avoid deadlock
drop(interface);
2024-07-29 02:40:00 +00:00
drop(sockets);
2024-06-18 07:41:52 +00:00
if let Some(instant) = poll_at {
let old_instant = self.next_poll_at_ms.load(Ordering::Relaxed);
2024-02-26 03:32:53 +00:00
let new_instant = instant.total_millis() as u64;
self.next_poll_at_ms.store(new_instant, Ordering::Relaxed);
2024-02-26 03:32:53 +00:00
if old_instant == 0 || new_instant < old_instant {
2024-02-26 03:32:53 +00:00
self.polling_wait_queue.wake_all();
}
2023-05-31 02:47:52 +00:00
} else {
2024-02-26 03:32:53 +00:00
self.next_poll_at_ms.store(0, Ordering::Relaxed);
2023-05-31 02:47:52 +00:00
}
2024-06-18 07:41:52 +00:00
if has_events {
2024-07-29 02:49:57 +00:00
// We never try to hold the write lock in the IRQ context, and we disable IRQ when
// holding the write lock. So we don't need to disable IRQ when holding the read lock.
2024-06-18 07:41:52 +00:00
self.bound_sockets.read().iter().for_each(|bound_socket| {
2024-07-23 15:03:10 +00:00
bound_socket.on_iface_events();
2024-06-18 07:41:52 +00:00
});
2024-07-23 15:03:10 +00:00
let closed_sockets = self
.closing_sockets
2024-08-09 03:32:27 +00:00
.disable_irq()
.lock()
2024-07-23 15:03:10 +00:00
.extract_if(|closing_socket| closing_socket.is_closed())
.collect::<Vec<_>>();
drop(closed_sockets);
2024-06-18 07:41:52 +00:00
}
2023-05-31 02:47:52 +00:00
}
pub(super) fn next_poll_at_ms(&self) -> Option<u64> {
let millis = self.next_poll_at_ms.load(Ordering::Relaxed);
2023-05-31 02:47:52 +00:00
if millis == 0 {
None
} else {
Some(millis)
}
}
2024-07-23 15:03:10 +00:00
fn insert_bound_socket(&self, socket: &Arc<AnyBoundSocketInner>) {
let keyable_socket = KeyableArc::from(socket.clone());
2024-07-29 02:49:57 +00:00
let inserted = self
.bound_sockets
.write_irq_disabled()
.insert(keyable_socket);
2024-07-23 15:03:10 +00:00
assert!(inserted);
2023-05-31 02:47:52 +00:00
}
2024-07-23 15:03:10 +00:00
pub(super) fn remove_bound_socket_now(&self, socket: &Arc<AnyBoundSocketInner>) {
let keyable_socket = KeyableArc::from(socket.clone());
2024-07-29 02:49:57 +00:00
let removed = self
.bound_sockets
.write_irq_disabled()
.remove(&keyable_socket);
2024-07-23 15:03:10 +00:00
assert!(removed);
}
pub(super) fn remove_bound_socket_when_closed(&self, socket: &Arc<AnyBoundSocketInner>) {
let keyable_socket = KeyableArc::from(socket.clone());
2024-07-29 02:49:57 +00:00
let removed = self
.bound_sockets
.write_irq_disabled()
.remove(&keyable_socket);
2024-07-23 15:03:10 +00:00
assert!(removed);
2024-08-09 03:32:27 +00:00
let mut closing_sockets = self.closing_sockets.disable_irq().lock();
2024-07-23 15:03:10 +00:00
// Check `is_closed` after holding the lock to avoid race conditions.
if keyable_socket.is_closed() {
return;
}
let inserted = closing_sockets.insert(keyable_socket);
assert!(inserted);
2023-05-31 02:47:52 +00:00
}
}
const IP_LOCAL_PORT_START: u16 = 49152;
const IP_LOCAL_PORT_END: u16 = 65535;