rust: enable `clippy::as_underscore` lint
In Rust 1.63.0, Clippy introduced the `as_underscore` lint [1]: > The conversion might include lossy conversion or a dangerous cast that > might go undetected due to the type being inferred. > > The lint is allowed by default as using `_` is less wordy than always > specifying the type. Always specifying the type is especially helpful in function call contexts where the inferred type may change at a distance. Specifying the type also allows Clippy to spot more cases of `useless_conversion`. The primary downside is the need to specify the type in trivial getters. There are 4 such functions: 3 have become slightly less ergonomic, 1 was revealed to be a `useless_conversion`. While this doesn't eliminate unchecked `as` conversions, it makes such conversions easier to scrutinize. It also has the slight benefit of removing a degree of freedom on which to bikeshed. Thus apply the changes and enable the lint -- no functional change intended. Link: https://rust-lang.github.io/rust-clippy/master/index.html#as_underscore [1] Reviewed-by: Benno Lossin <benno.lossin@proton.me> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Tamir Duberstein <tamird@gmail.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Acked-by: Danilo Krummrich <dakr@kernel.org> Link: https://lore.kernel.org/r/20250615-ptr-as-ptr-v12-4-f43b024581e8@gmail.com [ Changed `isize` to `c_long`. - Miguel ] Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
This commit is contained in:
parent
23773bd8da
commit
5e30550558
1
Makefile
1
Makefile
|
@ -480,6 +480,7 @@ export rust_common_flags := --edition=2021 \
|
|||
-Wunreachable_pub \
|
||||
-Wclippy::all \
|
||||
-Wclippy::as_ptr_cast_mut \
|
||||
-Wclippy::as_underscore \
|
||||
-Wclippy::ignored_unit_patterns \
|
||||
-Wclippy::mut_mut \
|
||||
-Wclippy::needless_bitwise_bool \
|
||||
|
|
|
@ -19,7 +19,7 @@ kernel::pci_device_table!(
|
|||
MODULE_PCI_TABLE,
|
||||
<NovaCore as pci::Driver>::IdInfo,
|
||||
[(
|
||||
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as _),
|
||||
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
|
||||
()
|
||||
)]
|
||||
);
|
||||
|
|
|
@ -101,7 +101,7 @@ impl<T: Operations> OperationsVTable<T> {
|
|||
if let Err(e) = ret {
|
||||
e.to_blk_status()
|
||||
} else {
|
||||
bindings::BLK_STS_OK as _
|
||||
bindings::BLK_STS_OK as bindings::blk_status_t
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,12 @@ impl<T: Operations> Request<T> {
|
|||
// success of the call to `try_set_end` guarantees that there are no
|
||||
// `ARef`s pointing to this request. Therefore it is safe to hand it
|
||||
// back to the block layer.
|
||||
unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
|
||||
unsafe {
|
||||
bindings::blk_mq_end_request(
|
||||
request_ptr,
|
||||
bindings::BLK_STS_OK as bindings::blk_status_t,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
|
|||
unsafe {
|
||||
raw_ids[i]
|
||||
.as_mut_ptr()
|
||||
.byte_offset(T::DRIVER_DATA_OFFSET as _)
|
||||
.byte_add(T::DRIVER_DATA_OFFSET)
|
||||
.cast::<usize>()
|
||||
.write(i);
|
||||
}
|
||||
|
|
|
@ -61,19 +61,19 @@ struct DevresInner<T> {
|
|||
/// unsafe fn new(paddr: usize) -> Result<Self>{
|
||||
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
|
||||
/// // valid for `ioremap`.
|
||||
/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
|
||||
/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
|
||||
/// if addr.is_null() {
|
||||
/// return Err(ENOMEM);
|
||||
/// }
|
||||
///
|
||||
/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
|
||||
/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
|
||||
/// fn drop(&mut self) {
|
||||
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
|
||||
/// unsafe { bindings::iounmap(self.0.addr() as _); };
|
||||
/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
|
@ -115,8 +115,9 @@ impl<T> DevresInner<T> {
|
|||
|
||||
// SAFETY: `devm_add_action` guarantees to call `Self::devres_callback` once `dev` is
|
||||
// detached.
|
||||
let ret =
|
||||
unsafe { bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data as _) };
|
||||
let ret = unsafe {
|
||||
bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data.cast_mut().cast())
|
||||
};
|
||||
|
||||
if ret != 0 {
|
||||
// SAFETY: We just created another reference to `inner` in order to pass it to
|
||||
|
@ -130,7 +131,7 @@ impl<T> DevresInner<T> {
|
|||
}
|
||||
|
||||
fn as_ptr(&self) -> *const Self {
|
||||
self as _
|
||||
self
|
||||
}
|
||||
|
||||
fn remove_action(this: &Arc<Self>) -> bool {
|
||||
|
|
|
@ -38,7 +38,7 @@ pub struct Attrs(u32);
|
|||
impl Attrs {
|
||||
/// Get the raw representation of this attribute.
|
||||
pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
|
||||
self.0 as _
|
||||
self.0 as crate::ffi::c_ulong
|
||||
}
|
||||
|
||||
/// Check whether `flags` is contained in `self`.
|
||||
|
|
|
@ -89,7 +89,7 @@ impl<T: drm::Driver> Device<T> {
|
|||
driver_features: drm::driver::FEAT_GEM,
|
||||
ioctls: T::IOCTLS.as_ptr(),
|
||||
num_ioctls: T::IOCTLS.len() as i32,
|
||||
fops: &Self::GEM_FOPS as _,
|
||||
fops: &Self::GEM_FOPS,
|
||||
};
|
||||
|
||||
const GEM_FOPS: bindings::file_operations = drm::gem::create_fops();
|
||||
|
|
|
@ -153,7 +153,7 @@ impl Error {
|
|||
/// Returns the error encoded as a pointer.
|
||||
pub fn to_ptr<T>(self) -> *mut T {
|
||||
// SAFETY: `self.0` is a valid error due to its invariant.
|
||||
unsafe { bindings::ERR_PTR(self.0.get() as _).cast() }
|
||||
unsafe { bindings::ERR_PTR(self.0.get() as crate::ffi::c_long).cast() }
|
||||
}
|
||||
|
||||
/// Returns a string representing the error, if one exists.
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
|
||||
|
||||
use crate::error::{code::EINVAL, Result};
|
||||
use crate::{bindings, build_assert};
|
||||
use crate::{bindings, build_assert, ffi::c_void};
|
||||
|
||||
/// Raw representation of an MMIO region.
|
||||
///
|
||||
|
@ -56,7 +56,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use kernel::{bindings, io::{Io, IoRaw}};
|
||||
/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
|
||||
/// # use core::ops::Deref;
|
||||
///
|
||||
/// // See also [`pci::Bar`] for a real example.
|
||||
|
@ -70,19 +70,19 @@ impl<const SIZE: usize> IoRaw<SIZE> {
|
|||
/// unsafe fn new(paddr: usize) -> Result<Self>{
|
||||
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
|
||||
/// // valid for `ioremap`.
|
||||
/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
|
||||
/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
|
||||
/// if addr.is_null() {
|
||||
/// return Err(ENOMEM);
|
||||
/// }
|
||||
///
|
||||
/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
|
||||
/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
|
||||
/// fn drop(&mut self) {
|
||||
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
|
||||
/// unsafe { bindings::iounmap(self.0.addr() as _); };
|
||||
/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
|
@ -119,7 +119,7 @@ macro_rules! define_read {
|
|||
let addr = self.io_addr_assert::<$type_name>(offset);
|
||||
|
||||
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
|
||||
unsafe { bindings::$c_fn(addr as _) }
|
||||
unsafe { bindings::$c_fn(addr as *const c_void) }
|
||||
}
|
||||
|
||||
/// Read IO data from a given offset.
|
||||
|
@ -131,7 +131,7 @@ macro_rules! define_read {
|
|||
let addr = self.io_addr::<$type_name>(offset)?;
|
||||
|
||||
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
|
||||
Ok(unsafe { bindings::$c_fn(addr as _) })
|
||||
Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ macro_rules! define_write {
|
|||
let addr = self.io_addr_assert::<$type_name>(offset);
|
||||
|
||||
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
|
||||
unsafe { bindings::$c_fn(value, addr as _, ) }
|
||||
unsafe { bindings::$c_fn(value, addr as *mut c_void) }
|
||||
}
|
||||
|
||||
/// Write IO data from a given offset.
|
||||
|
@ -160,7 +160,7 @@ macro_rules! define_write {
|
|||
let addr = self.io_addr::<$type_name>(offset)?;
|
||||
|
||||
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
|
||||
unsafe { bindings::$c_fn(value, addr as _) }
|
||||
unsafe { bindings::$c_fn(value, addr as *mut c_void) }
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
|
|
@ -34,7 +34,7 @@ impl MiscDeviceOptions {
|
|||
pub const fn into_raw<T: MiscDevice>(self) -> bindings::miscdevice {
|
||||
// SAFETY: All zeros is valid for this C type.
|
||||
let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
result.minor = bindings::MISC_DYNAMIC_MINOR as _;
|
||||
result.minor = bindings::MISC_DYNAMIC_MINOR as ffi::c_int;
|
||||
result.name = self.name.as_char_ptr();
|
||||
result.fops = MiscdeviceVTable::<T>::build();
|
||||
result
|
||||
|
|
|
@ -392,80 +392,80 @@ pub mod flags {
|
|||
use crate::bindings;
|
||||
|
||||
/// No flags are set.
|
||||
pub const NONE: vm_flags_t = bindings::VM_NONE as _;
|
||||
pub const NONE: vm_flags_t = bindings::VM_NONE as vm_flags_t;
|
||||
|
||||
/// Mapping allows reads.
|
||||
pub const READ: vm_flags_t = bindings::VM_READ as _;
|
||||
pub const READ: vm_flags_t = bindings::VM_READ as vm_flags_t;
|
||||
|
||||
/// Mapping allows writes.
|
||||
pub const WRITE: vm_flags_t = bindings::VM_WRITE as _;
|
||||
pub const WRITE: vm_flags_t = bindings::VM_WRITE as vm_flags_t;
|
||||
|
||||
/// Mapping allows execution.
|
||||
pub const EXEC: vm_flags_t = bindings::VM_EXEC as _;
|
||||
pub const EXEC: vm_flags_t = bindings::VM_EXEC as vm_flags_t;
|
||||
|
||||
/// Mapping is shared.
|
||||
pub const SHARED: vm_flags_t = bindings::VM_SHARED as _;
|
||||
pub const SHARED: vm_flags_t = bindings::VM_SHARED as vm_flags_t;
|
||||
|
||||
/// Mapping may be updated to allow reads.
|
||||
pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as _;
|
||||
pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as vm_flags_t;
|
||||
|
||||
/// Mapping may be updated to allow writes.
|
||||
pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as _;
|
||||
pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as vm_flags_t;
|
||||
|
||||
/// Mapping may be updated to allow execution.
|
||||
pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as _;
|
||||
pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as vm_flags_t;
|
||||
|
||||
/// Mapping may be updated to be shared.
|
||||
pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as _;
|
||||
pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as vm_flags_t;
|
||||
|
||||
/// Page-ranges managed without `struct page`, just pure PFN.
|
||||
pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as _;
|
||||
pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as vm_flags_t;
|
||||
|
||||
/// Memory mapped I/O or similar.
|
||||
pub const IO: vm_flags_t = bindings::VM_IO as _;
|
||||
pub const IO: vm_flags_t = bindings::VM_IO as vm_flags_t;
|
||||
|
||||
/// Do not copy this vma on fork.
|
||||
pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as _;
|
||||
pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as vm_flags_t;
|
||||
|
||||
/// Cannot expand with mremap().
|
||||
pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as _;
|
||||
pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as vm_flags_t;
|
||||
|
||||
/// Lock the pages covered when they are faulted in.
|
||||
pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as _;
|
||||
pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as vm_flags_t;
|
||||
|
||||
/// Is a VM accounted object.
|
||||
pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as _;
|
||||
pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as vm_flags_t;
|
||||
|
||||
/// Should the VM suppress accounting.
|
||||
pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as _;
|
||||
pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as vm_flags_t;
|
||||
|
||||
/// Huge TLB Page VM.
|
||||
pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as _;
|
||||
pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as vm_flags_t;
|
||||
|
||||
/// Synchronous page faults. (DAX-specific)
|
||||
pub const SYNC: vm_flags_t = bindings::VM_SYNC as _;
|
||||
pub const SYNC: vm_flags_t = bindings::VM_SYNC as vm_flags_t;
|
||||
|
||||
/// Architecture-specific flag.
|
||||
pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as _;
|
||||
pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as vm_flags_t;
|
||||
|
||||
/// Wipe VMA contents in child on fork.
|
||||
pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as _;
|
||||
pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as vm_flags_t;
|
||||
|
||||
/// Do not include in the core dump.
|
||||
pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as _;
|
||||
pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as vm_flags_t;
|
||||
|
||||
/// Not soft dirty clean area.
|
||||
pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as _;
|
||||
pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as vm_flags_t;
|
||||
|
||||
/// Can contain `struct page` and pure PFN pages.
|
||||
pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as _;
|
||||
pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as vm_flags_t;
|
||||
|
||||
/// MADV_HUGEPAGE marked this vma.
|
||||
pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as _;
|
||||
pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as vm_flags_t;
|
||||
|
||||
/// MADV_NOHUGEPAGE marked this vma.
|
||||
pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as _;
|
||||
pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as vm_flags_t;
|
||||
|
||||
/// KSM may merge identical pages.
|
||||
pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as _;
|
||||
pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as vm_flags_t;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ unsafe impl RawDeviceId for DeviceId {
|
|||
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data);
|
||||
|
||||
fn index(&self) -> usize {
|
||||
self.0.data as _
|
||||
self.0.data as usize
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,10 +34,10 @@ impl DeviceId {
|
|||
// SAFETY: FFI type is valid to be zero-initialized.
|
||||
let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() };
|
||||
|
||||
// TODO: Use `clone_from_slice` once the corresponding types do match.
|
||||
// TODO: Use `copy_from_slice` once stabilized for `const`.
|
||||
let mut i = 0;
|
||||
while i < src.len() {
|
||||
of.compatible[i] = src[i] as _;
|
||||
of.compatible[i] = src[i];
|
||||
i += 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ unsafe impl RawDeviceId for DeviceId {
|
|||
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data);
|
||||
|
||||
fn index(&self) -> usize {
|
||||
self.0.driver_data as _
|
||||
self.0.driver_data
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,7 +206,10 @@ macro_rules! pci_device_table {
|
|||
/// MODULE_PCI_TABLE,
|
||||
/// <MyDriver as pci::Driver>::IdInfo,
|
||||
/// [
|
||||
/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ())
|
||||
/// (
|
||||
/// pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as u32),
|
||||
/// (),
|
||||
/// )
|
||||
/// ]
|
||||
/// );
|
||||
///
|
||||
|
@ -330,7 +333,7 @@ impl<const SIZE: usize> Bar<SIZE> {
|
|||
// `ioptr` is valid by the safety requirements.
|
||||
// `num` is valid by the safety requirements.
|
||||
unsafe {
|
||||
bindings::pci_iounmap(pdev.as_raw(), ioptr as _);
|
||||
bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut kernel::ffi::c_void);
|
||||
bindings::pci_release_region(pdev.as_raw(), num);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -728,9 +728,9 @@ impl RawFormatter {
|
|||
pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
|
||||
// INVARIANT: The safety requirements guarantee the type invariants.
|
||||
Self {
|
||||
beg: pos as _,
|
||||
pos: pos as _,
|
||||
end: end as _,
|
||||
beg: pos as usize,
|
||||
pos: pos as usize,
|
||||
end: end as usize,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -755,7 +755,7 @@ impl RawFormatter {
|
|||
///
|
||||
/// N.B. It may point to invalid memory.
|
||||
pub(crate) fn pos(&self) -> *mut u8 {
|
||||
self.pos as _
|
||||
self.pos as *mut u8
|
||||
}
|
||||
|
||||
/// Returns the number of bytes written to the formatter.
|
||||
|
|
|
@ -198,7 +198,7 @@ impl Queue {
|
|||
unsafe {
|
||||
w.__enqueue(move |work_ptr| {
|
||||
bindings::queue_work_on(
|
||||
bindings::wq_misc_consts_WORK_CPU_UNBOUND as _,
|
||||
bindings::wq_misc_consts_WORK_CPU_UNBOUND as ffi::c_int,
|
||||
queue_ptr,
|
||||
work_ptr,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue