Fix `if let` chains that can be collapsed

This commit is contained in:
Zhang Junyang 2025-12-08 23:28:59 +08:00 committed by Tate, Hongliang Tian
parent 127290f70d
commit 6d2679852f
19 changed files with 143 additions and 140 deletions

View File

@ -62,14 +62,13 @@ impl BioRequestSingleQueue {
}
let mut queue = self.queue.lock();
if let Some(request) = queue.front_mut() {
if request.can_merge(&bio)
if let Some(request) = queue.front_mut()
&& request.can_merge(&bio)
&& request.num_segments() + bio.segments().len() <= self.max_nr_segments_per_bio
{
request.merge_bio(bio);
return Ok(());
}
}
let new_request = BioRequest::from(bio);
queue.push_front(new_request);

View File

@ -225,13 +225,13 @@ impl FbConsoleHandler {
return;
}
if let Some(bytes) = self.keycode_to_ascii(keycode) {
if let Some(console) = FRAMEBUFFER_CONSOLE.get() {
if let Some(bytes) = self.keycode_to_ascii(keycode)
&& let Some(console) = FRAMEBUFFER_CONSOLE.get()
{
console.trigger_input_callbacks(bytes);
}
}
}
}
impl InputHandler for FbConsoleHandler {
fn handle_events(&self, events: &[InputEvent]) {

View File

@ -225,12 +225,12 @@ fn handle_mouse_input(_trap_frame: &TrapFrame) {
};
let mut packet_state = PACKET_STATE.lock();
if let Some(events) = packet_state.process_byte(data) {
if let Some(registered_device) = REGISTERED_DEVICE.get() {
if let Some(events) = packet_state.process_byte(data)
&& let Some(registered_device) = REGISTERED_DEVICE.get()
{
registered_device.submit_events(&events);
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum MouseType {

View File

@ -485,24 +485,24 @@ impl InputDevice {
// Set key capabilities.
if let Some(key_bits) = &ev_key {
for bit in 0..key_bits.len() * 8 {
if key_bits[bit / 8] & (1 << (bit % 8)) != 0 {
if let Some(key_code) = map_to_key_code(bit as u16) {
if key_bits[bit / 8] & (1 << (bit % 8)) != 0
&& let Some(key_code) = map_to_key_code(bit as u16)
{
capability.set_supported_key(key_code);
}
}
}
}
// Set relative axis capabilities.
if let Some(rel_bits) = &ev_rel {
for bit in 0..rel_bits.len() * 8 {
if rel_bits[bit / 8] & (1 << (bit % 8)) != 0 {
if let Some(rel_code) = map_to_rel_code(bit as u16) {
if rel_bits[bit / 8] & (1 << (bit % 8)) != 0
&& let Some(rel_code) = map_to_rel_code(bit as u16)
{
capability.set_supported_relative_axis(rel_code);
}
}
}
}
info!(
"VirtIO input device capabilities set: KEY={}, REL={}",

View File

@ -538,15 +538,15 @@ impl<E: Ext> PollContext<'_, E> {
}));
});
if let Some((ip_repr, ip_payload)) = deferred {
if let Some(reply) = self.parse_and_process_udp(
if let Some((ip_repr, ip_payload)) = deferred
&& let Some(reply) = self.parse_and_process_udp(
&ip_repr,
&ip_payload,
&ChecksumCapabilities::ignored(),
) {
)
{
dispatch_phy(&reply, self.iface.context_mut(), tx_token.take().unwrap());
}
}
if tx_token.is_none() {
break;

View File

@ -249,15 +249,15 @@ impl InotifyFile {
{
let mut event_queue = self.event_queue.lock();
if let Some(last_event) = event_queue.back() {
if can_merge_events(last_event, &new_event) {
if let Some(last_event) = event_queue.back()
&& can_merge_events(last_event, &new_event)
{
event_queue.pop_back();
event_queue.push_back(new_event);
// New or merged event makes the file readable
self.pollee.notify(IoEvents::IN);
return;
}
}
// If the queue is full, drop the event.
// We do not return an error to the caller.

View File

@ -519,11 +519,12 @@ impl DentryChildren {
/// Checks whether the dentry is a mount point. Returns an error if it is.
fn check_mountpoint(&self, name: &str) -> Result<()> {
if let Some(Some(dentry)) = self.dentries.get(name) {
if dentry.is_mountpoint() {
if let Some(Some(dentry)) = self.dentries.get(name)
&& dentry.is_mountpoint()
{
return_errno_with_message!(Errno::EBUSY, "dentry is mountpint");
}
}
Ok(())
}

View File

@ -118,8 +118,9 @@ impl PipeWriter {
};
let res = self.state.write_with(write);
if res.is_err_and(|e| e.error() == Errno::EPIPE) {
if let Some(posix_thread) = current_thread!().as_posix_thread() {
if res.is_err_and(|e| e.error() == Errno::EPIPE)
&& let Some(posix_thread) = current_thread!().as_posix_thread()
{
posix_thread.enqueue_signal(Box::new(UserSignal::new(
SIGPIPE,
UserSignalKind::Kill,
@ -127,7 +128,6 @@ impl PipeWriter {
posix_thread.credentials().ruid(),
)));
}
}
res
}

View File

@ -206,11 +206,12 @@ impl OptionalBuilder {
// The volatile property is inherited from parent.
let is_volatile = {
let mut is_volatile = self.is_volatile;
if let Some(parent) = self.parent.as_ref() {
if !parent.upgrade().unwrap().is_dentry_cacheable() {
if let Some(parent) = self.parent.as_ref()
&& !parent.upgrade().unwrap().is_dentry_cacheable()
{
is_volatile = true;
}
}
is_volatile
};

View File

@ -359,13 +359,14 @@ impl PageCacheManager {
let backend = self.backend();
let backend_npages = backend.npages();
for idx in page_idx_range.start..page_idx_range.end {
if let Some(page) = pages.peek(&idx) {
if page.load_state() == PageState::Dirty && idx < backend_npages {
if let Some(page) = pages.peek(&idx)
&& page.load_state() == PageState::Dirty
&& idx < backend_npages
{
let waiter = backend.write_page_async(idx, page)?;
bio_waiter.concat(waiter);
}
}
}
if !matches!(bio_waiter.wait(), Some(BioStatus::Complete)) {
// Do not allow partial failure
@ -455,8 +456,9 @@ impl Pager for PageCacheManager {
fn decommit_page(&self, idx: usize) -> Result<()> {
let page_result = self.pages.lock().pop(&idx);
if let Some(page) = page_result {
if let PageState::Dirty = page.load_state() {
if let Some(page) = page_result
&& let PageState::Dirty = page.load_state()
{
let Some(backend) = self.backend.upgrade() else {
return Ok(());
};
@ -464,7 +466,6 @@ impl Pager for PageCacheManager {
backend.write_page(idx, &page)?;
}
}
}
Ok(())
}

View File

@ -108,11 +108,12 @@ impl VsockStreamSocket {
let peer_addr = self.peer_addr()?;
// If buffer is now empty and the peer requested shutdown, finish shutting down the
// connection.
if connected.should_close() {
if let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR) {
if connected.should_close()
&& let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR)
{
debug!("The error is {:?}", e);
}
}
Ok((read_size, peer_addr))
}
}
@ -186,14 +187,13 @@ impl Socket for VsockStreamSocket {
if !connecting
.poll(IoEvents::IN, Some(poller.as_handle_mut()))
.contains(IoEvents::IN)
&& let Err(e) = poller.wait()
{
if let Err(e) = poller.wait() {
vsockspace
.remove_connecting_socket(&connecting.local_addr())
.unwrap();
return Err(e);
}
}
vsockspace
.remove_connecting_socket(&connecting.local_addr())

View File

@ -147,17 +147,17 @@ impl StopStatus {
pub(super) fn wait(&self, options: WaitOptions) -> Option<StopWaitStatus> {
let mut wait_status = self.wait_status.lock();
if options.contains(WaitOptions::WSTOPPED) {
if let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref() {
if options.contains(WaitOptions::WSTOPPED)
&& let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref()
{
return wait_status.take();
}
}
if options.contains(WaitOptions::WCONTINUED) {
if let Some(StopWaitStatus::Continue) = wait_status.as_ref() {
if options.contains(WaitOptions::WCONTINUED)
&& let Some(StopWaitStatus::Continue) = wait_status.as_ref()
{
return wait_status.take();
}
}
None
}

View File

@ -124,11 +124,12 @@ where
if v.range().end > *point {
return Some(cursor.remove_prev().unwrap().1);
}
} else if let Some((_, v)) = cursor.peek_next() {
if v.range().start <= *point {
} else if let Some((_, v)) = cursor.peek_next()
&& v.range().start <= *point
{
return Some(cursor.remove_next().unwrap().1);
}
}
None
}
@ -177,12 +178,12 @@ where
// There's one previous element that may intersect with the range.
if !self.peeked_prev {
self.peeked_prev = true;
if let Some((_, v)) = self.cursor.peek_prev() {
if v.range().end > self.range.start {
if let Some((_, v)) = self.cursor.peek_prev()
&& v.range().end > self.range.start
{
return Some(v);
}
}
}
// Find all intersected elements following it.
if let Some((_, v)) = self.cursor.next() {
@ -219,12 +220,12 @@ where
// There's one previous element that may intersect with the range.
if !self.drained_prev {
self.drained_prev = true;
if let Some((_, v)) = self.cursor.peek_prev() {
if v.range().end > self.range.start {
if let Some((_, v)) = self.cursor.peek_prev()
&& v.range().end > self.range.start
{
return Some(self.cursor.remove_prev().unwrap().1);
}
}
}
// Find all intersected elements following it.
if let Some((_, v)) = self.cursor.peek_next() {

View File

@ -816,11 +816,11 @@ impl VmarInner {
.map_or(VMAR_LOWEST_ADDR, |vm_mapping| vm_mapping.range().end);
// FIXME: The up-align may overflow.
let last_occupied_aligned = highest_occupied.align_up(align);
if let Some(last) = last_occupied_aligned.checked_add(size) {
if last <= VMAR_CAP_ADDR {
if let Some(last) = last_occupied_aligned.checked_add(size)
&& last <= VMAR_CAP_ADDR
{
return Ok(last_occupied_aligned..last);
}
}
// Slow path that we need to search for a free region.
// Here, we use a simple brute-force FIRST-FIT algorithm.

View File

@ -77,15 +77,15 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
// If the alignment order is larger than the size order, we need to split
// the chunk and return the rest part back to the free lists.
let allocated_size = size_of_order(order);
if allocated_size > layout.size() {
if let Some(chunk_addr) = chunk_addr {
if allocated_size > layout.size()
&& let Some(chunk_addr) = chunk_addr
{
do_dealloc(
&mut local_pool,
&mut global_pool,
[(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(),
);
}
}
balancing::balance(local_pool.deref_mut(), &mut global_pool);

View File

@ -105,12 +105,12 @@ where
let crate_set =
crate_whitelist.map(|crates| crates.iter().copied().collect::<BTreeSet<&str>>());
for crate_ in tree.iter() {
if let Some(crate_set) = &crate_set {
if !crate_set.contains(crate_.name()) {
if let Some(crate_set) = &crate_set
&& !crate_set.contains(crate_.name())
{
early_print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name());
continue;
}
}
match run_crate_ktests(crate_, &whitelist_trie) {
KtestResult::Ok => {}
KtestResult::Failed => return KtestResult::Failed,
@ -149,7 +149,7 @@ fn run_crate_ktests(crate_: &KtestCrate, whitelist: &Option<SuffixTrie>) -> Ktes
debug_assert_eq!(test.info().package, crate_name);
match test.run(
&(ostd::panic::catch_unwind::<(), fn()>
as fn(fn()) -> Result<(), Box<(dyn Any + Send + 'static)>>),
as fn(fn()) -> Result<(), Box<dyn Any + Send + 'static>>),
) {
Ok(()) => {
early_print!(" {}\n", "ok".green());

View File

@ -73,12 +73,13 @@ fn for_each_hart_id(mut f: impl FnMut(u32)) {
// FIXME: We should find a robust method to identify the management
// harts. Here we simply skip those harts without MMU, which is
// supposed to work in most cases.
if device_type.as_str() == Some("cpu") && cpu_node.property("mmu-type").is_some() {
if let Some(reg) = cpu_node.property("reg") {
if device_type.as_str() == Some("cpu")
&& cpu_node.property("mmu-type").is_some()
&& let Some(reg) = cpu_node.property("reg")
{
f(reg.as_usize().unwrap() as u32);
}
}
}
})
}

View File

@ -96,12 +96,12 @@ fn parse_isa_extensions_list(isa_extensions: &fdt::node::NodeProperty) -> IsaExt
if str.is_empty() {
continue;
}
if let Ok(ext_name) = core::str::from_utf8(str) {
if let Some(ext_data) = EXTENSION_TABLE.iter().find(|e| e.name == ext_name) {
if let Ok(ext_name) = core::str::from_utf8(str)
&& let Some(ext_data) = EXTENSION_TABLE.iter().find(|e| e.name == ext_name)
{
extensions |= ext_data.flag;
}
}
}
extensions
}

View File

@ -86,15 +86,15 @@ impl RangeAllocator {
}
}
if let Some(key) = to_remove {
if let Some(freenode) = freelist.get_mut(&key) {
if let Some(key) = to_remove
&& let Some(freenode) = freelist.get_mut(&key)
{
if freenode.block.end - size == freenode.block.start {
freelist.remove(&key);
} else {
freenode.block.end -= size;
}
}
}
if let Some(range) = allocate_range {
Ok(range)
@ -117,28 +117,27 @@ impl RangeAllocator {
if let Some((prev_va, prev_node)) = freelist
.upper_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_prev()
&& prev_node.block.end == free_range.start
{
if prev_node.block.end == free_range.start {
let prev_va = *prev_va;
free_range.start = prev_node.block.start;
freelist.remove(&prev_va);
}
}
freelist.insert(free_range.start, FreeRange::new(free_range.clone()));
// 2. check if we can merge the current block with the next block, if we can, do so.
if let Some((next_va, next_node)) = freelist
.lower_bound_mut(core::ops::Bound::Excluded(&free_range.start))
.peek_next()
&& free_range.end == next_node.block.start
{
if free_range.end == next_node.block.start {
let next_va = *next_va;
free_range.end = next_node.block.end;
freelist.remove(&next_va);
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
}
}
}
fn get_freelist_guard(
&self,