Fix `if let` chains that can be collapsed
This commit is contained in:
parent
127290f70d
commit
6d2679852f
|
|
@ -62,13 +62,12 @@ impl BioRequestSingleQueue {
|
|||
}
|
||||
|
||||
let mut queue = self.queue.lock();
|
||||
if let Some(request) = queue.front_mut() {
|
||||
if request.can_merge(&bio)
|
||||
&& request.num_segments() + bio.segments().len() <= self.max_nr_segments_per_bio
|
||||
{
|
||||
request.merge_bio(bio);
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(request) = queue.front_mut()
|
||||
&& request.can_merge(&bio)
|
||||
&& request.num_segments() + bio.segments().len() <= self.max_nr_segments_per_bio
|
||||
{
|
||||
request.merge_bio(bio);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let new_request = BioRequest::from(bio);
|
||||
|
|
|
|||
|
|
@ -225,10 +225,10 @@ impl FbConsoleHandler {
|
|||
return;
|
||||
}
|
||||
|
||||
if let Some(bytes) = self.keycode_to_ascii(keycode) {
|
||||
if let Some(console) = FRAMEBUFFER_CONSOLE.get() {
|
||||
console.trigger_input_callbacks(bytes);
|
||||
}
|
||||
if let Some(bytes) = self.keycode_to_ascii(keycode)
|
||||
&& let Some(console) = FRAMEBUFFER_CONSOLE.get()
|
||||
{
|
||||
console.trigger_input_callbacks(bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -225,10 +225,10 @@ fn handle_mouse_input(_trap_frame: &TrapFrame) {
|
|||
};
|
||||
|
||||
let mut packet_state = PACKET_STATE.lock();
|
||||
if let Some(events) = packet_state.process_byte(data) {
|
||||
if let Some(registered_device) = REGISTERED_DEVICE.get() {
|
||||
registered_device.submit_events(&events);
|
||||
}
|
||||
if let Some(events) = packet_state.process_byte(data)
|
||||
&& let Some(registered_device) = REGISTERED_DEVICE.get()
|
||||
{
|
||||
registered_device.submit_events(&events);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -485,10 +485,10 @@ impl InputDevice {
|
|||
// Set key capabilities.
|
||||
if let Some(key_bits) = &ev_key {
|
||||
for bit in 0..key_bits.len() * 8 {
|
||||
if key_bits[bit / 8] & (1 << (bit % 8)) != 0 {
|
||||
if let Some(key_code) = map_to_key_code(bit as u16) {
|
||||
capability.set_supported_key(key_code);
|
||||
}
|
||||
if key_bits[bit / 8] & (1 << (bit % 8)) != 0
|
||||
&& let Some(key_code) = map_to_key_code(bit as u16)
|
||||
{
|
||||
capability.set_supported_key(key_code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -496,10 +496,10 @@ impl InputDevice {
|
|||
// Set relative axis capabilities.
|
||||
if let Some(rel_bits) = &ev_rel {
|
||||
for bit in 0..rel_bits.len() * 8 {
|
||||
if rel_bits[bit / 8] & (1 << (bit % 8)) != 0 {
|
||||
if let Some(rel_code) = map_to_rel_code(bit as u16) {
|
||||
capability.set_supported_relative_axis(rel_code);
|
||||
}
|
||||
if rel_bits[bit / 8] & (1 << (bit % 8)) != 0
|
||||
&& let Some(rel_code) = map_to_rel_code(bit as u16)
|
||||
{
|
||||
capability.set_supported_relative_axis(rel_code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -538,14 +538,14 @@ impl<E: Ext> PollContext<'_, E> {
|
|||
}));
|
||||
});
|
||||
|
||||
if let Some((ip_repr, ip_payload)) = deferred {
|
||||
if let Some(reply) = self.parse_and_process_udp(
|
||||
if let Some((ip_repr, ip_payload)) = deferred
|
||||
&& let Some(reply) = self.parse_and_process_udp(
|
||||
&ip_repr,
|
||||
&ip_payload,
|
||||
&ChecksumCapabilities::ignored(),
|
||||
) {
|
||||
dispatch_phy(&reply, self.iface.context_mut(), tx_token.take().unwrap());
|
||||
}
|
||||
)
|
||||
{
|
||||
dispatch_phy(&reply, self.iface.context_mut(), tx_token.take().unwrap());
|
||||
}
|
||||
|
||||
if tx_token.is_none() {
|
||||
|
|
|
|||
|
|
@ -249,14 +249,14 @@ impl InotifyFile {
|
|||
|
||||
{
|
||||
let mut event_queue = self.event_queue.lock();
|
||||
if let Some(last_event) = event_queue.back() {
|
||||
if can_merge_events(last_event, &new_event) {
|
||||
event_queue.pop_back();
|
||||
event_queue.push_back(new_event);
|
||||
// New or merged event makes the file readable
|
||||
self.pollee.notify(IoEvents::IN);
|
||||
return;
|
||||
}
|
||||
if let Some(last_event) = event_queue.back()
|
||||
&& can_merge_events(last_event, &new_event)
|
||||
{
|
||||
event_queue.pop_back();
|
||||
event_queue.push_back(new_event);
|
||||
// New or merged event makes the file readable
|
||||
self.pollee.notify(IoEvents::IN);
|
||||
return;
|
||||
}
|
||||
|
||||
// If the queue is full, drop the event.
|
||||
|
|
|
|||
|
|
@ -519,11 +519,12 @@ impl DentryChildren {
|
|||
|
||||
/// Checks whether the dentry is a mount point. Returns an error if it is.
|
||||
fn check_mountpoint(&self, name: &str) -> Result<()> {
|
||||
if let Some(Some(dentry)) = self.dentries.get(name) {
|
||||
if dentry.is_mountpoint() {
|
||||
return_errno_with_message!(Errno::EBUSY, "dentry is mountpint");
|
||||
}
|
||||
if let Some(Some(dentry)) = self.dentries.get(name)
|
||||
&& dentry.is_mountpoint()
|
||||
{
|
||||
return_errno_with_message!(Errno::EBUSY, "dentry is mountpint");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -118,15 +118,15 @@ impl PipeWriter {
|
|||
};
|
||||
|
||||
let res = self.state.write_with(write);
|
||||
if res.is_err_and(|e| e.error() == Errno::EPIPE) {
|
||||
if let Some(posix_thread) = current_thread!().as_posix_thread() {
|
||||
posix_thread.enqueue_signal(Box::new(UserSignal::new(
|
||||
SIGPIPE,
|
||||
UserSignalKind::Kill,
|
||||
posix_thread.process().pid(),
|
||||
posix_thread.credentials().ruid(),
|
||||
)));
|
||||
}
|
||||
if res.is_err_and(|e| e.error() == Errno::EPIPE)
|
||||
&& let Some(posix_thread) = current_thread!().as_posix_thread()
|
||||
{
|
||||
posix_thread.enqueue_signal(Box::new(UserSignal::new(
|
||||
SIGPIPE,
|
||||
UserSignalKind::Kill,
|
||||
posix_thread.process().pid(),
|
||||
posix_thread.credentials().ruid(),
|
||||
)));
|
||||
}
|
||||
|
||||
res
|
||||
|
|
|
|||
|
|
@ -206,11 +206,12 @@ impl OptionalBuilder {
|
|||
// The volatile property is inherited from parent.
|
||||
let is_volatile = {
|
||||
let mut is_volatile = self.is_volatile;
|
||||
if let Some(parent) = self.parent.as_ref() {
|
||||
if !parent.upgrade().unwrap().is_dentry_cacheable() {
|
||||
is_volatile = true;
|
||||
}
|
||||
if let Some(parent) = self.parent.as_ref()
|
||||
&& !parent.upgrade().unwrap().is_dentry_cacheable()
|
||||
{
|
||||
is_volatile = true;
|
||||
}
|
||||
|
||||
is_volatile
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -359,11 +359,12 @@ impl PageCacheManager {
|
|||
let backend = self.backend();
|
||||
let backend_npages = backend.npages();
|
||||
for idx in page_idx_range.start..page_idx_range.end {
|
||||
if let Some(page) = pages.peek(&idx) {
|
||||
if page.load_state() == PageState::Dirty && idx < backend_npages {
|
||||
let waiter = backend.write_page_async(idx, page)?;
|
||||
bio_waiter.concat(waiter);
|
||||
}
|
||||
if let Some(page) = pages.peek(&idx)
|
||||
&& page.load_state() == PageState::Dirty
|
||||
&& idx < backend_npages
|
||||
{
|
||||
let waiter = backend.write_page_async(idx, page)?;
|
||||
bio_waiter.concat(waiter);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -455,14 +456,14 @@ impl Pager for PageCacheManager {
|
|||
|
||||
fn decommit_page(&self, idx: usize) -> Result<()> {
|
||||
let page_result = self.pages.lock().pop(&idx);
|
||||
if let Some(page) = page_result {
|
||||
if let PageState::Dirty = page.load_state() {
|
||||
let Some(backend) = self.backend.upgrade() else {
|
||||
return Ok(());
|
||||
};
|
||||
if idx < backend.npages() {
|
||||
backend.write_page(idx, &page)?;
|
||||
}
|
||||
if let Some(page) = page_result
|
||||
&& let PageState::Dirty = page.load_state()
|
||||
{
|
||||
let Some(backend) = self.backend.upgrade() else {
|
||||
return Ok(());
|
||||
};
|
||||
if idx < backend.npages() {
|
||||
backend.write_page(idx, &page)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -108,11 +108,12 @@ impl VsockStreamSocket {
|
|||
let peer_addr = self.peer_addr()?;
|
||||
// If buffer is now empty and the peer requested shutdown, finish shutting down the
|
||||
// connection.
|
||||
if connected.should_close() {
|
||||
if let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR) {
|
||||
debug!("The error is {:?}", e);
|
||||
}
|
||||
if connected.should_close()
|
||||
&& let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR)
|
||||
{
|
||||
debug!("The error is {:?}", e);
|
||||
}
|
||||
|
||||
Ok((read_size, peer_addr))
|
||||
}
|
||||
}
|
||||
|
|
@ -186,13 +187,12 @@ impl Socket for VsockStreamSocket {
|
|||
if !connecting
|
||||
.poll(IoEvents::IN, Some(poller.as_handle_mut()))
|
||||
.contains(IoEvents::IN)
|
||||
&& let Err(e) = poller.wait()
|
||||
{
|
||||
if let Err(e) = poller.wait() {
|
||||
vsockspace
|
||||
.remove_connecting_socket(&connecting.local_addr())
|
||||
.unwrap();
|
||||
return Err(e);
|
||||
}
|
||||
vsockspace
|
||||
.remove_connecting_socket(&connecting.local_addr())
|
||||
.unwrap();
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
vsockspace
|
||||
|
|
|
|||
|
|
@ -147,16 +147,16 @@ impl StopStatus {
|
|||
pub(super) fn wait(&self, options: WaitOptions) -> Option<StopWaitStatus> {
|
||||
let mut wait_status = self.wait_status.lock();
|
||||
|
||||
if options.contains(WaitOptions::WSTOPPED) {
|
||||
if let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref() {
|
||||
return wait_status.take();
|
||||
}
|
||||
if options.contains(WaitOptions::WSTOPPED)
|
||||
&& let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref()
|
||||
{
|
||||
return wait_status.take();
|
||||
}
|
||||
|
||||
if options.contains(WaitOptions::WCONTINUED) {
|
||||
if let Some(StopWaitStatus::Continue) = wait_status.as_ref() {
|
||||
return wait_status.take();
|
||||
}
|
||||
if options.contains(WaitOptions::WCONTINUED)
|
||||
&& let Some(StopWaitStatus::Continue) = wait_status.as_ref()
|
||||
{
|
||||
return wait_status.take();
|
||||
}
|
||||
|
||||
None
|
||||
|
|
|
|||
|
|
@ -124,11 +124,12 @@ where
|
|||
if v.range().end > *point {
|
||||
return Some(cursor.remove_prev().unwrap().1);
|
||||
}
|
||||
} else if let Some((_, v)) = cursor.peek_next() {
|
||||
if v.range().start <= *point {
|
||||
return Some(cursor.remove_next().unwrap().1);
|
||||
}
|
||||
} else if let Some((_, v)) = cursor.peek_next()
|
||||
&& v.range().start <= *point
|
||||
{
|
||||
return Some(cursor.remove_next().unwrap().1);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
|
|
@ -177,10 +178,10 @@ where
|
|||
// There's one previous element that may intersect with the range.
|
||||
if !self.peeked_prev {
|
||||
self.peeked_prev = true;
|
||||
if let Some((_, v)) = self.cursor.peek_prev() {
|
||||
if v.range().end > self.range.start {
|
||||
return Some(v);
|
||||
}
|
||||
if let Some((_, v)) = self.cursor.peek_prev()
|
||||
&& v.range().end > self.range.start
|
||||
{
|
||||
return Some(v);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -219,10 +220,10 @@ where
|
|||
// There's one previous element that may intersect with the range.
|
||||
if !self.drained_prev {
|
||||
self.drained_prev = true;
|
||||
if let Some((_, v)) = self.cursor.peek_prev() {
|
||||
if v.range().end > self.range.start {
|
||||
return Some(self.cursor.remove_prev().unwrap().1);
|
||||
}
|
||||
if let Some((_, v)) = self.cursor.peek_prev()
|
||||
&& v.range().end > self.range.start
|
||||
{
|
||||
return Some(self.cursor.remove_prev().unwrap().1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -816,10 +816,10 @@ impl VmarInner {
|
|||
.map_or(VMAR_LOWEST_ADDR, |vm_mapping| vm_mapping.range().end);
|
||||
// FIXME: The up-align may overflow.
|
||||
let last_occupied_aligned = highest_occupied.align_up(align);
|
||||
if let Some(last) = last_occupied_aligned.checked_add(size) {
|
||||
if last <= VMAR_CAP_ADDR {
|
||||
return Ok(last_occupied_aligned..last);
|
||||
}
|
||||
if let Some(last) = last_occupied_aligned.checked_add(size)
|
||||
&& last <= VMAR_CAP_ADDR
|
||||
{
|
||||
return Ok(last_occupied_aligned..last);
|
||||
}
|
||||
|
||||
// Slow path that we need to search for a free region.
|
||||
|
|
|
|||
|
|
@ -77,14 +77,14 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
|
|||
// If the alignment order is larger than the size order, we need to split
|
||||
// the chunk and return the rest part back to the free lists.
|
||||
let allocated_size = size_of_order(order);
|
||||
if allocated_size > layout.size() {
|
||||
if let Some(chunk_addr) = chunk_addr {
|
||||
do_dealloc(
|
||||
&mut local_pool,
|
||||
&mut global_pool,
|
||||
[(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(),
|
||||
);
|
||||
}
|
||||
if allocated_size > layout.size()
|
||||
&& let Some(chunk_addr) = chunk_addr
|
||||
{
|
||||
do_dealloc(
|
||||
&mut local_pool,
|
||||
&mut global_pool,
|
||||
[(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(),
|
||||
);
|
||||
}
|
||||
|
||||
balancing::balance(local_pool.deref_mut(), &mut global_pool);
|
||||
|
|
|
|||
|
|
@ -105,11 +105,11 @@ where
|
|||
let crate_set =
|
||||
crate_whitelist.map(|crates| crates.iter().copied().collect::<BTreeSet<&str>>());
|
||||
for crate_ in tree.iter() {
|
||||
if let Some(crate_set) = &crate_set {
|
||||
if !crate_set.contains(crate_.name()) {
|
||||
early_print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name());
|
||||
continue;
|
||||
}
|
||||
if let Some(crate_set) = &crate_set
|
||||
&& !crate_set.contains(crate_.name())
|
||||
{
|
||||
early_print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name());
|
||||
continue;
|
||||
}
|
||||
match run_crate_ktests(crate_, &whitelist_trie) {
|
||||
KtestResult::Ok => {}
|
||||
|
|
@ -149,7 +149,7 @@ fn run_crate_ktests(crate_: &KtestCrate, whitelist: &Option<SuffixTrie>) -> Ktes
|
|||
debug_assert_eq!(test.info().package, crate_name);
|
||||
match test.run(
|
||||
&(ostd::panic::catch_unwind::<(), fn()>
|
||||
as fn(fn()) -> Result<(), Box<(dyn Any + Send + 'static)>>),
|
||||
as fn(fn()) -> Result<(), Box<dyn Any + Send + 'static>>),
|
||||
) {
|
||||
Ok(()) => {
|
||||
early_print!(" {}\n", "ok".green());
|
||||
|
|
|
|||
|
|
@ -73,10 +73,11 @@ fn for_each_hart_id(mut f: impl FnMut(u32)) {
|
|||
// FIXME: We should find a robust method to identify the management
|
||||
// harts. Here we simply skip those harts without MMU, which is
|
||||
// supposed to work in most cases.
|
||||
if device_type.as_str() == Some("cpu") && cpu_node.property("mmu-type").is_some() {
|
||||
if let Some(reg) = cpu_node.property("reg") {
|
||||
f(reg.as_usize().unwrap() as u32);
|
||||
}
|
||||
if device_type.as_str() == Some("cpu")
|
||||
&& cpu_node.property("mmu-type").is_some()
|
||||
&& let Some(reg) = cpu_node.property("reg")
|
||||
{
|
||||
f(reg.as_usize().unwrap() as u32);
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -96,10 +96,10 @@ fn parse_isa_extensions_list(isa_extensions: &fdt::node::NodeProperty) -> IsaExt
|
|||
if str.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Ok(ext_name) = core::str::from_utf8(str) {
|
||||
if let Some(ext_data) = EXTENSION_TABLE.iter().find(|e| e.name == ext_name) {
|
||||
extensions |= ext_data.flag;
|
||||
}
|
||||
if let Ok(ext_name) = core::str::from_utf8(str)
|
||||
&& let Some(ext_data) = EXTENSION_TABLE.iter().find(|e| e.name == ext_name)
|
||||
{
|
||||
extensions |= ext_data.flag;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -86,13 +86,13 @@ impl RangeAllocator {
|
|||
}
|
||||
}
|
||||
|
||||
if let Some(key) = to_remove {
|
||||
if let Some(freenode) = freelist.get_mut(&key) {
|
||||
if freenode.block.end - size == freenode.block.start {
|
||||
freelist.remove(&key);
|
||||
} else {
|
||||
freenode.block.end -= size;
|
||||
}
|
||||
if let Some(key) = to_remove
|
||||
&& let Some(freenode) = freelist.get_mut(&key)
|
||||
{
|
||||
if freenode.block.end - size == freenode.block.start {
|
||||
freelist.remove(&key);
|
||||
} else {
|
||||
freenode.block.end -= size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -117,26 +117,25 @@ impl RangeAllocator {
|
|||
if let Some((prev_va, prev_node)) = freelist
|
||||
.upper_bound_mut(core::ops::Bound::Excluded(&free_range.start))
|
||||
.peek_prev()
|
||||
&& prev_node.block.end == free_range.start
|
||||
{
|
||||
if prev_node.block.end == free_range.start {
|
||||
let prev_va = *prev_va;
|
||||
free_range.start = prev_node.block.start;
|
||||
freelist.remove(&prev_va);
|
||||
}
|
||||
let prev_va = *prev_va;
|
||||
free_range.start = prev_node.block.start;
|
||||
freelist.remove(&prev_va);
|
||||
}
|
||||
|
||||
freelist.insert(free_range.start, FreeRange::new(free_range.clone()));
|
||||
|
||||
// 2. check if we can merge the current block with the next block, if we can, do so.
|
||||
if let Some((next_va, next_node)) = freelist
|
||||
.lower_bound_mut(core::ops::Bound::Excluded(&free_range.start))
|
||||
.peek_next()
|
||||
&& free_range.end == next_node.block.start
|
||||
{
|
||||
if free_range.end == next_node.block.start {
|
||||
let next_va = *next_va;
|
||||
free_range.end = next_node.block.end;
|
||||
freelist.remove(&next_va);
|
||||
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
|
||||
}
|
||||
let next_va = *next_va;
|
||||
free_range.end = next_node.block.end;
|
||||
freelist.remove(&next_va);
|
||||
freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue