diff options
Diffstat (limited to 'rust/kernel')
43 files changed, 2682 insertions, 761 deletions
| diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs index 1966bd407017..f2f7f3a53d29 100644 --- a/rust/kernel/alloc.rs +++ b/rust/kernel/alloc.rs @@ -1,23 +1,41 @@  // SPDX-License-Identifier: GPL-2.0 -//! Extensions to the [`alloc`] crate. +//! Implementation of the kernel's memory allocation infrastructure. -#[cfg(not(test))] -#[cfg(not(testlib))] -mod allocator; -pub mod box_ext; -pub mod vec_ext; +#[cfg(not(any(test, testlib)))] +pub mod allocator; +pub mod kbox; +pub mod kvec; +pub mod layout; + +#[cfg(any(test, testlib))] +pub mod allocator_test; + +#[cfg(any(test, testlib))] +pub use self::allocator_test as allocator; + +pub use self::kbox::Box; +pub use self::kbox::KBox; +pub use self::kbox::KVBox; +pub use self::kbox::VBox; + +pub use self::kvec::IntoIter; +pub use self::kvec::KVVec; +pub use self::kvec::KVec; +pub use self::kvec::VVec; +pub use self::kvec::Vec;  /// Indicates an allocation error.  #[derive(Copy, Clone, PartialEq, Eq, Debug)]  pub struct AllocError; +use core::{alloc::Layout, ptr::NonNull};  /// Flags to be used when allocating memory.  ///  /// They can be combined with the operators `|`, `&`, and `!`.  ///  /// Values can be used from the [`flags`] module. -#[derive(Clone, Copy)] +#[derive(Clone, Copy, PartialEq)]  pub struct Flags(u32);  impl Flags { @@ -25,6 +43,11 @@ impl Flags {      pub(crate) fn as_raw(self) -> u32 {          self.0      } + +    /// Check whether `flags` is contained in `self`. +    pub fn contains(self, flags: Flags) -> bool { +        (self & flags) == flags +    }  }  impl core::ops::BitOr for Flags { @@ -85,4 +108,117 @@ pub mod flags {      /// use any filesystem callback.  It is very likely to fail to allocate memory, even for very      /// small allocations.      pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT); + +    /// Suppresses allocation failure reports. +    /// +    /// This is normally or'd with other flags. +    pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN); +} + +/// The kernel's [`Allocator`] trait. +/// +/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described +/// via [`Layout`]. +/// +/// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on +/// an object instance. +/// +/// In order to be able to support `#[derive(SmartPointer)]` later on, we need to avoid a design +/// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind +/// of `self` parameter. +/// +/// # Safety +/// +/// - A memory allocation returned from an allocator must remain valid until it is explicitly freed. +/// +/// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`] +///   function of the same type. +/// +/// - Implementers must ensure that all trait functions abide by the guarantees documented in the +///   `# Guarantees` sections. +pub unsafe trait Allocator { +    /// Allocate memory based on `layout` and `flags`. +    /// +    /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout +    /// constraints (i.e. minimum size and alignment as specified by `layout`). +    /// +    /// This function is equivalent to `realloc` when called with `None`. +    /// +    /// # Guarantees +    /// +    /// When the return value is `Ok(ptr)`, then `ptr` is +    /// - valid for reads and writes for `layout.size()` bytes, until it is passed to +    ///   [`Allocator::free`] or [`Allocator::realloc`], +    /// - aligned to `layout.align()`, +    /// +    /// Additionally, `Flags` are honored as documented in +    /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>. +    fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> { +        // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a +        // new memory allocation. +        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) } +    } + +    /// Re-allocate an existing memory allocation to satisfy the requested `layout`. +    /// +    /// If the requested size is zero, `realloc` behaves equivalent to `free`. +    /// +    /// If the requested size is larger than the size of the existing allocation, a successful call +    /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but +    /// may also be larger. +    /// +    /// If the requested size is smaller than the size of the existing allocation, `realloc` may or +    /// may not shrink the buffer; this is implementation specific to the allocator. +    /// +    /// On allocation failure, the existing buffer, if any, remains valid. +    /// +    /// The buffer is represented as `NonNull<[u8]>`. +    /// +    /// # Safety +    /// +    /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation +    ///   created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a +    ///   pointer returned by this [`Allocator`]. +    /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and +    ///   `old_layout` is ignored. +    /// - `old_layout` must match the `Layout` the allocation has been created with. +    /// +    /// # Guarantees +    /// +    /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then +    /// it additionally guarantees that: +    /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new +    ///   and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] == +    ///   p[0..min(layout.size(), old_layout.size())]`. +    /// - when the return value is `Err(AllocError)`, then `ptr` is still valid. +    unsafe fn realloc( +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError>; + +    /// Free an existing memory allocation. +    /// +    /// # Safety +    /// +    /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`]; +    ///   if `old_layout` is zero-sized `p` does not need to be a pointer returned by this +    ///   [`Allocator`]. +    /// - `layout` must match the `Layout` the allocation has been created with. +    /// - The memory allocation at `ptr` must never again be read from or written to. +    unsafe fn free(ptr: NonNull<u8>, layout: Layout) { +        // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this +        // allocator. We are passing a `Layout` with the smallest possible alignment, so it is +        // smaller than or equal to the alignment previously used with this allocation. +        let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) }; +    } +} + +/// Returns a properly aligned dangling pointer from the given `layout`. +pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> { +    let ptr = layout.align() as *mut u8; + +    // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero. +    unsafe { NonNull::new_unchecked(ptr) }  } diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs index e6ea601f38c6..439985e29fbc 100644 --- a/rust/kernel/alloc/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -1,74 +1,188 @@  // SPDX-License-Identifier: GPL-2.0  //! Allocator support. +//! +//! Documentation for the kernel's memory allocators can found in the "Memory Allocation Guide" +//! linked below. For instance, this includes the concept of "get free page" (GFP) flags and the +//! typical application of the different kernel allocators. +//! +//! Reference: <https://docs.kernel.org/core-api/memory-allocation.html> -use super::{flags::*, Flags}; -use core::alloc::{GlobalAlloc, Layout}; +use super::Flags; +use core::alloc::Layout;  use core::ptr; +use core::ptr::NonNull; -struct KernelAllocator; +use crate::alloc::{AllocError, Allocator}; +use crate::bindings; +use crate::pr_warn; -/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment. +/// The contiguous kernel allocator.  /// -/// # Safety +/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also +/// supports larger allocations up to `bindings::KMALLOC_MAX_SIZE`, which is hardware specific.  /// -/// - `ptr` can be either null or a pointer which has been allocated by this allocator. -/// - `new_layout` must have a non-zero size. -pub(crate) unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: Flags) -> *mut u8 { +/// For more details see [self]. +pub struct Kmalloc; + +/// The virtually contiguous kernel allocator. +/// +/// `Vmalloc` allocates pages from the page level allocator and maps them into the contiguous kernel +/// virtual space. It is typically used for large allocations. The memory allocated with this +/// allocator is not physically contiguous. +/// +/// For more details see [self]. +pub struct Vmalloc; + +/// The kvmalloc kernel allocator. +/// +/// `KVmalloc` attempts to allocate memory with `Kmalloc` first, but falls back to `Vmalloc` upon +/// failure. This allocator is typically used when the size for the requested allocation is not +/// known and may exceed the capabilities of `Kmalloc`. +/// +/// For more details see [self]. +pub struct KVmalloc; + +/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment. +fn aligned_size(new_layout: Layout) -> usize {      // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.      let layout = new_layout.pad_to_align();      // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`      // which together with the slab guarantees means the `krealloc` will return a properly aligned      // object (see comments in `kmalloc()` for more information). -    let size = layout.size(); - -    // SAFETY: -    // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the -    //   function safety requirement. -    // - `size` is greater than 0 since it's from `layout.size()` (which cannot be zero according -    //   to the function safety requirement) -    unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags.0) as *mut u8 } +    layout.size()  } -unsafe impl GlobalAlloc for KernelAllocator { -    unsafe fn alloc(&self, layout: Layout) -> *mut u8 { -        // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety -        // requirement. -        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL) } -    } +/// # Invariants +/// +/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`. +struct ReallocFunc( +    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void, +); -    unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { -        unsafe { -            bindings::kfree(ptr as *const core::ffi::c_void); -        } -    } +impl ReallocFunc { +    // INVARIANT: `krealloc` satisfies the type invariants. +    const KREALLOC: Self = Self(bindings::krealloc); -    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { -        // SAFETY: -        // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not -        //   overflow `isize` by the function safety requirement. -        // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two). -        let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }; +    // INVARIANT: `vrealloc` satisfies the type invariants. +    const VREALLOC: Self = Self(bindings::vrealloc); + +    // INVARIANT: `kvrealloc` satisfies the type invariants. +    const KVREALLOC: Self = Self(bindings::kvrealloc); + +    /// # Safety +    /// +    /// This method has the same safety requirements as [`Allocator::realloc`]. +    /// +    /// # Guarantees +    /// +    /// This method has the same guarantees as `Allocator::realloc`. Additionally +    /// - it accepts any pointer to a valid memory allocation allocated by this function. +    /// - memory allocated by this function remains valid until it is passed to this function. +    unsafe fn call( +        &self, +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError> { +        let size = aligned_size(layout); +        let ptr = match ptr { +            Some(ptr) => { +                if old_layout.size() == 0 { +                    ptr::null() +                } else { +                    ptr.as_ptr() +                } +            } +            None => ptr::null(), +        };          // SAFETY: -        // - `ptr` is either null or a pointer allocated by this allocator by the function safety -        //   requirement. -        // - the size of `layout` is not zero because `new_size` is not zero by the function safety -        //   requirement. -        unsafe { krealloc_aligned(ptr, layout, GFP_KERNEL) } +        // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc` and thus only requires that +        //   `ptr` is NULL or valid. +        // - `ptr` is either NULL or valid by the safety requirements of this function. +        // +        // GUARANTEE: +        // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc`. +        // - Those functions provide the guarantees of this function. +        let raw_ptr = unsafe { +            // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed. +            self.0(ptr.cast(), size, flags.0).cast() +        }; + +        let ptr = if size == 0 { +            crate::alloc::dangling_from_layout(layout) +        } else { +            NonNull::new(raw_ptr).ok_or(AllocError)? +        }; + +        Ok(NonNull::slice_from_raw_parts(ptr, size)) +    } +} + +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for Kmalloc { +    #[inline] +    unsafe fn realloc( +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError> { +        // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`. +        unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }      } +} + +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for Vmalloc { +    #[inline] +    unsafe fn realloc( +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError> { +        // TODO: Support alignments larger than PAGE_SIZE. +        if layout.align() > bindings::PAGE_SIZE { +            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n"); +            return Err(AllocError); +        } -    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { -        // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety -        // requirement. -        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL | __GFP_ZERO) } +        // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously +        // allocated with this `Allocator`. +        unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }      }  } -#[global_allocator] -static ALLOCATOR: KernelAllocator = KernelAllocator; +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for KVmalloc { +    #[inline] +    unsafe fn realloc( +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError> { +        // TODO: Support alignments larger than PAGE_SIZE. +        if layout.align() > bindings::PAGE_SIZE { +            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n"); +            return Err(AllocError); +        } -// See <https://github.com/rust-lang/rust/pull/86844>. -#[no_mangle] -static __rust_no_alloc_shim_is_unstable: u8 = 0; +        // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously +        // allocated with this `Allocator`. +        unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) } +    } +} diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs new file mode 100644 index 000000000000..e3240d16040b --- /dev/null +++ b/rust/kernel/alloc/allocator_test.rs @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users +//! of those types (e.g. `CString`) use kernel allocators for instantiation. +//! +//! In order to allow userspace test cases to make use of such types as well, implement the +//! `Cmalloc` allocator within the allocator_test module and type alias all kernel allocators to +//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend. + +#![allow(missing_docs)] + +use super::{flags::*, AllocError, Allocator, Flags}; +use core::alloc::Layout; +use core::cmp; +use core::ptr; +use core::ptr::NonNull; + +/// The userspace allocator based on libc. +pub struct Cmalloc; + +pub type Kmalloc = Cmalloc; +pub type Vmalloc = Kmalloc; +pub type KVmalloc = Kmalloc; + +extern "C" { +    #[link_name = "aligned_alloc"] +    fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void; + +    #[link_name = "free"] +    fn libc_free(ptr: *mut crate::ffi::c_void); +} + +// SAFETY: +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK, +// - `realloc` provides the guarantees as provided in the `# Guarantees` section. +unsafe impl Allocator for Cmalloc { +    unsafe fn realloc( +        ptr: Option<NonNull<u8>>, +        layout: Layout, +        old_layout: Layout, +        flags: Flags, +    ) -> Result<NonNull<[u8]>, AllocError> { +        let src = match ptr { +            Some(src) => { +                if old_layout.size() == 0 { +                    ptr::null_mut() +                } else { +                    src.as_ptr() +                } +            } +            None => ptr::null_mut(), +        }; + +        if layout.size() == 0 { +            // SAFETY: `src` is either NULL or was previously allocated with this `Allocator` +            unsafe { libc_free(src.cast()) }; + +            return Ok(NonNull::slice_from_raw_parts( +                crate::alloc::dangling_from_layout(layout), +                0, +            )); +        } + +        // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or +        // exceeds the given size and alignment requirements. +        let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8; +        let dst = NonNull::new(dst).ok_or(AllocError)?; + +        if flags.contains(__GFP_ZERO) { +            // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new` +            // guarantee that `dst` points to memory of at least `layout.size()` bytes. +            unsafe { dst.as_ptr().write_bytes(0, layout.size()) }; +        } + +        if !src.is_null() { +            // SAFETY: +            // - `src` has previously been allocated with this `Allocator`; `dst` has just been +            //   newly allocated, hence the memory regions do not overlap. +            // - both` src` and `dst` are properly aligned and valid for reads and writes +            unsafe { +                ptr::copy_nonoverlapping( +                    src, +                    dst.as_ptr(), +                    cmp::min(layout.size(), old_layout.size()), +                ) +            }; +        } + +        // SAFETY: `src` is either NULL or was previously allocated with this `Allocator` +        unsafe { libc_free(src.cast()) }; + +        Ok(NonNull::slice_from_raw_parts(dst, layout.size())) +    } +} diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs deleted file mode 100644 index 7009ad78d4e0..000000000000 --- a/rust/kernel/alloc/box_ext.rs +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -//! Extensions to [`Box`] for fallible allocations. - -use super::{AllocError, Flags}; -use alloc::boxed::Box; -use core::{mem::MaybeUninit, ptr, result::Result}; - -/// Extensions to [`Box`]. -pub trait BoxExt<T>: Sized { -    /// Allocates a new box. -    /// -    /// The allocation may fail, in which case an error is returned. -    fn new(x: T, flags: Flags) -> Result<Self, AllocError>; - -    /// Allocates a new uninitialised box. -    /// -    /// The allocation may fail, in which case an error is returned. -    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>; - -    /// Drops the contents, but keeps the allocation. -    /// -    /// # Examples -    /// -    /// ``` -    /// use kernel::alloc::{flags, box_ext::BoxExt}; -    /// let value = Box::new([0; 32], flags::GFP_KERNEL)?; -    /// assert_eq!(*value, [0; 32]); -    /// let mut value = Box::drop_contents(value); -    /// // Now we can re-use `value`: -    /// value.write([1; 32]); -    /// // SAFETY: We just wrote to it. -    /// let value = unsafe { value.assume_init() }; -    /// assert_eq!(*value, [1; 32]); -    /// # Ok::<(), Error>(()) -    /// ``` -    fn drop_contents(this: Self) -> Box<MaybeUninit<T>>; -} - -impl<T> BoxExt<T> for Box<T> { -    fn new(x: T, flags: Flags) -> Result<Self, AllocError> { -        let mut b = <Self as BoxExt<_>>::new_uninit(flags)?; -        b.write(x); -        // SAFETY: We just wrote to it. -        Ok(unsafe { b.assume_init() }) -    } - -    #[cfg(any(test, testlib))] -    fn new_uninit(_flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> { -        Ok(Box::new_uninit()) -    } - -    #[cfg(not(any(test, testlib)))] -    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> { -        let ptr = if core::mem::size_of::<MaybeUninit<T>>() == 0 { -            core::ptr::NonNull::<_>::dangling().as_ptr() -        } else { -            let layout = core::alloc::Layout::new::<MaybeUninit<T>>(); - -            // SAFETY: Memory is being allocated (first arg is null). The only other source of -            // safety issues is sleeping on atomic context, which is addressed by klint. Lastly, -            // the type is not a SZT (checked above). -            let ptr = -                unsafe { super::allocator::krealloc_aligned(core::ptr::null_mut(), layout, flags) }; -            if ptr.is_null() { -                return Err(AllocError); -            } - -            ptr.cast::<MaybeUninit<T>>() -        }; - -        // SAFETY: For non-zero-sized types, we allocate above using the global allocator. For -        // zero-sized types, we use `NonNull::dangling`. -        Ok(unsafe { Box::from_raw(ptr) }) -    } - -    fn drop_contents(this: Self) -> Box<MaybeUninit<T>> { -        let ptr = Box::into_raw(this); -        // SAFETY: `ptr` is valid, because it came from `Box::into_raw`. -        unsafe { ptr::drop_in_place(ptr) }; - -        // CAST: `MaybeUninit<T>` is a transparent wrapper of `T`. -        let ptr = ptr.cast::<MaybeUninit<T>>(); - -        // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for -        // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit<T>`. -        unsafe { Box::from_raw(ptr) } -    } -} diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs new file mode 100644 index 000000000000..9ce414361c2c --- /dev/null +++ b/rust/kernel/alloc/kbox.rs @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Implementation of [`Box`]. + +#[allow(unused_imports)] // Used in doc comments. +use super::allocator::{KVmalloc, Kmalloc, Vmalloc}; +use super::{AllocError, Allocator, Flags}; +use core::alloc::Layout; +use core::fmt; +use core::marker::PhantomData; +use core::mem::ManuallyDrop; +use core::mem::MaybeUninit; +use core::ops::{Deref, DerefMut}; +use core::pin::Pin; +use core::ptr::NonNull; +use core::result::Result; + +use crate::init::{InPlaceInit, InPlaceWrite, Init, PinInit}; +use crate::types::ForeignOwnable; + +/// The kernel's [`Box`] type -- a heap allocation for a single value of type `T`. +/// +/// This is the kernel's version of the Rust stdlib's `Box`. There are several differences, +/// for example no `noalias` attribute is emitted and partially moving out of a `Box` is not +/// supported. There are also several API differences, e.g. `Box` always requires an [`Allocator`] +/// implementation to be passed as generic, page [`Flags`] when allocating memory and all functions +/// that may allocate memory are fallible. +/// +/// `Box` works with any of the kernel's allocators, e.g. [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]. +/// There are aliases for `Box` with these allocators ([`KBox`], [`VBox`], [`KVBox`]). +/// +/// When dropping a [`Box`], the value is also dropped and the heap memory is automatically freed. +/// +/// # Examples +/// +/// ``` +/// let b = KBox::<u64>::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +/// +/// ``` +/// # use kernel::bindings; +/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1; +/// struct Huge([u8; SIZE]); +/// +/// assert!(KBox::<Huge>::new_uninit(GFP_KERNEL | __GFP_NOWARN).is_err()); +/// ``` +/// +/// ``` +/// # use kernel::bindings; +/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1; +/// struct Huge([u8; SIZE]); +/// +/// assert!(KVBox::<Huge>::new_uninit(GFP_KERNEL).is_ok()); +/// ``` +/// +/// # Invariants +/// +/// `self.0` is always properly aligned and either points to memory allocated with `A` or, for +/// zero-sized types, is a dangling, well aligned pointer. +#[repr(transparent)] +pub struct Box<T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>); + +/// Type alias for [`Box`] with a [`Kmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = KBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type KBox<T> = Box<T, super::allocator::Kmalloc>; + +/// Type alias for [`Box`] with a [`Vmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = VBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type VBox<T> = Box<T, super::allocator::Vmalloc>; + +/// Type alias for [`Box`] with a [`KVmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = KVBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type KVBox<T> = Box<T, super::allocator::KVmalloc>; + +// SAFETY: `Box` is `Send` if `T` is `Send` because the `Box` owns a `T`. +unsafe impl<T, A> Send for Box<T, A> +where +    T: Send + ?Sized, +    A: Allocator, +{ +} + +// SAFETY: `Box` is `Sync` if `T` is `Sync` because the `Box` owns a `T`. +unsafe impl<T, A> Sync for Box<T, A> +where +    T: Sync + ?Sized, +    A: Allocator, +{ +} + +impl<T, A> Box<T, A> +where +    T: ?Sized, +    A: Allocator, +{ +    /// Creates a new `Box<T, A>` from a raw pointer. +    /// +    /// # Safety +    /// +    /// For non-ZSTs, `raw` must point at an allocation allocated with `A` that is sufficiently +    /// aligned for and holds a valid `T`. The caller passes ownership of the allocation to the +    /// `Box`. +    /// +    /// For ZSTs, `raw` must be a dangling, well aligned pointer. +    #[inline] +    pub const unsafe fn from_raw(raw: *mut T) -> Self { +        // INVARIANT: Validity of `raw` is guaranteed by the safety preconditions of this function. +        // SAFETY: By the safety preconditions of this function, `raw` is not a NULL pointer. +        Self(unsafe { NonNull::new_unchecked(raw) }, PhantomData) +    } + +    /// Consumes the `Box<T, A>` and returns a raw pointer. +    /// +    /// This will not run the destructor of `T` and for non-ZSTs the allocation will stay alive +    /// indefinitely. Use [`Box::from_raw`] to recover the [`Box`], drop the value and free the +    /// allocation, if any. +    /// +    /// # Examples +    /// +    /// ``` +    /// let x = KBox::new(24, GFP_KERNEL)?; +    /// let ptr = KBox::into_raw(x); +    /// // SAFETY: `ptr` comes from a previous call to `KBox::into_raw`. +    /// let x = unsafe { KBox::from_raw(ptr) }; +    /// +    /// assert_eq!(*x, 24); +    /// # Ok::<(), Error>(()) +    /// ``` +    #[inline] +    pub fn into_raw(b: Self) -> *mut T { +        ManuallyDrop::new(b).0.as_ptr() +    } + +    /// Consumes and leaks the `Box<T, A>` and returns a mutable reference. +    /// +    /// See [`Box::into_raw`] for more details. +    #[inline] +    pub fn leak<'a>(b: Self) -> &'a mut T { +        // SAFETY: `Box::into_raw` always returns a properly aligned and dereferenceable pointer +        // which points to an initialized instance of `T`. +        unsafe { &mut *Box::into_raw(b) } +    } +} + +impl<T, A> Box<MaybeUninit<T>, A> +where +    A: Allocator, +{ +    /// Converts a `Box<MaybeUninit<T>, A>` to a `Box<T, A>`. +    /// +    /// It is undefined behavior to call this function while the value inside of `b` is not yet +    /// fully initialized. +    /// +    /// # Safety +    /// +    /// Callers must ensure that the value inside of `b` is in an initialized state. +    pub unsafe fn assume_init(self) -> Box<T, A> { +        let raw = Self::into_raw(self); + +        // SAFETY: `raw` comes from a previous call to `Box::into_raw`. By the safety requirements +        // of this function, the value inside the `Box` is in an initialized state. Hence, it is +        // safe to reconstruct the `Box` as `Box<T, A>`. +        unsafe { Box::from_raw(raw.cast()) } +    } + +    /// Writes the value and converts to `Box<T, A>`. +    pub fn write(mut self, value: T) -> Box<T, A> { +        (*self).write(value); + +        // SAFETY: We've just initialized `b`'s value. +        unsafe { self.assume_init() } +    } +} + +impl<T, A> Box<T, A> +where +    A: Allocator, +{ +    /// Creates a new `Box<T, A>` and initializes its contents with `x`. +    /// +    /// New memory is allocated with `A`. The allocation may fail, in which case an error is +    /// returned. For ZSTs no memory is allocated. +    pub fn new(x: T, flags: Flags) -> Result<Self, AllocError> { +        let b = Self::new_uninit(flags)?; +        Ok(Box::write(b, x)) +    } + +    /// Creates a new `Box<T, A>` with uninitialized contents. +    /// +    /// New memory is allocated with `A`. The allocation may fail, in which case an error is +    /// returned. For ZSTs no memory is allocated. +    /// +    /// # Examples +    /// +    /// ``` +    /// let b = KBox::<u64>::new_uninit(GFP_KERNEL)?; +    /// let b = KBox::write(b, 24); +    /// +    /// assert_eq!(*b, 24_u64); +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>, A>, AllocError> { +        let layout = Layout::new::<MaybeUninit<T>>(); +        let ptr = A::alloc(layout, flags)?; + +        // INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`, +        // which is sufficient in size and alignment for storing a `T`. +        Ok(Box(ptr.cast(), PhantomData)) +    } + +    /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then `x` will be +    /// pinned in memory and can't be moved. +    #[inline] +    pub fn pin(x: T, flags: Flags) -> Result<Pin<Box<T, A>>, AllocError> +    where +        A: 'static, +    { +        Ok(Self::new(x, flags)?.into()) +    } + +    /// Forgets the contents (does not run the destructor), but keeps the allocation. +    fn forget_contents(this: Self) -> Box<MaybeUninit<T>, A> { +        let ptr = Self::into_raw(this); + +        // SAFETY: `ptr` is valid, because it came from `Box::into_raw`. +        unsafe { Box::from_raw(ptr.cast()) } +    } + +    /// Drops the contents, but keeps the allocation. +    /// +    /// # Examples +    /// +    /// ``` +    /// let value = KBox::new([0; 32], GFP_KERNEL)?; +    /// assert_eq!(*value, [0; 32]); +    /// let value = KBox::drop_contents(value); +    /// // Now we can re-use `value`: +    /// let value = KBox::write(value, [1; 32]); +    /// assert_eq!(*value, [1; 32]); +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn drop_contents(this: Self) -> Box<MaybeUninit<T>, A> { +        let ptr = this.0.as_ptr(); + +        // SAFETY: `ptr` is valid, because it came from `this`. After this call we never access the +        // value stored in `this` again. +        unsafe { core::ptr::drop_in_place(ptr) }; + +        Self::forget_contents(this) +    } + +    /// Moves the `Box`'s value out of the `Box` and consumes the `Box`. +    pub fn into_inner(b: Self) -> T { +        // SAFETY: By the type invariant `&*b` is valid for `read`. +        let value = unsafe { core::ptr::read(&*b) }; +        let _ = Self::forget_contents(b); +        value +    } +} + +impl<T, A> From<Box<T, A>> for Pin<Box<T, A>> +where +    T: ?Sized, +    A: Allocator, +{ +    /// Converts a `Box<T, A>` into a `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then +    /// `*b` will be pinned in memory and can't be moved. +    /// +    /// This moves `b` into `Pin` without moving `*b` or allocating and copying any memory. +    fn from(b: Box<T, A>) -> Self { +        // SAFETY: The value wrapped inside a `Pin<Box<T, A>>` cannot be moved or replaced as long +        // as `T` does not implement `Unpin`. +        unsafe { Pin::new_unchecked(b) } +    } +} + +impl<T, A> InPlaceWrite<T> for Box<MaybeUninit<T>, A> +where +    A: Allocator + 'static, +{ +    type Initialized = Box<T, A>; + +    fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> { +        let slot = self.as_mut_ptr(); +        // SAFETY: When init errors/panics, slot will get deallocated but not dropped, +        // slot is valid. +        unsafe { init.__init(slot)? }; +        // SAFETY: All fields have been initialized. +        Ok(unsafe { Box::assume_init(self) }) +    } + +    fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> { +        let slot = self.as_mut_ptr(); +        // SAFETY: When init errors/panics, slot will get deallocated but not dropped, +        // slot is valid and will not be moved, because we pin it later. +        unsafe { init.__pinned_init(slot)? }; +        // SAFETY: All fields have been initialized. +        Ok(unsafe { Box::assume_init(self) }.into()) +    } +} + +impl<T, A> InPlaceInit<T> for Box<T, A> +where +    A: Allocator + 'static, +{ +    type PinnedSelf = Pin<Self>; + +    #[inline] +    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E> +    where +        E: From<AllocError>, +    { +        Box::<_, A>::new_uninit(flags)?.write_pin_init(init) +    } + +    #[inline] +    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E> +    where +        E: From<AllocError>, +    { +        Box::<_, A>::new_uninit(flags)?.write_init(init) +    } +} + +impl<T: 'static, A> ForeignOwnable for Box<T, A> +where +    A: Allocator, +{ +    type Borrowed<'a> = &'a T; + +    fn into_foreign(self) -> *const crate::ffi::c_void { +        Box::into_raw(self) as _ +    } + +    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self { +        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous +        // call to `Self::into_foreign`. +        unsafe { Box::from_raw(ptr as _) } +    } + +    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> &'a T { +        // SAFETY: The safety requirements of this method ensure that the object remains alive and +        // immutable for the duration of 'a. +        unsafe { &*ptr.cast() } +    } +} + +impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>> +where +    A: Allocator, +{ +    type Borrowed<'a> = Pin<&'a T>; + +    fn into_foreign(self) -> *const crate::ffi::c_void { +        // SAFETY: We are still treating the box as pinned. +        Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _ +    } + +    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self { +        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous +        // call to `Self::into_foreign`. +        unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) } +    } + +    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Pin<&'a T> { +        // SAFETY: The safety requirements for this function ensure that the object is still alive, +        // so it is safe to dereference the raw pointer. +        // The safety requirements of `from_foreign` also ensure that the object remains alive for +        // the lifetime of the returned value. +        let r = unsafe { &*ptr.cast() }; + +        // SAFETY: This pointer originates from a `Pin<Box<T>>`. +        unsafe { Pin::new_unchecked(r) } +    } +} + +impl<T, A> Deref for Box<T, A> +where +    T: ?Sized, +    A: Allocator, +{ +    type Target = T; + +    fn deref(&self) -> &T { +        // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized +        // instance of `T`. +        unsafe { self.0.as_ref() } +    } +} + +impl<T, A> DerefMut for Box<T, A> +where +    T: ?Sized, +    A: Allocator, +{ +    fn deref_mut(&mut self) -> &mut T { +        // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized +        // instance of `T`. +        unsafe { self.0.as_mut() } +    } +} + +impl<T, A> fmt::Debug for Box<T, A> +where +    T: ?Sized + fmt::Debug, +    A: Allocator, +{ +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +        fmt::Debug::fmt(&**self, f) +    } +} + +impl<T, A> Drop for Box<T, A> +where +    T: ?Sized, +    A: Allocator, +{ +    fn drop(&mut self) { +        let layout = Layout::for_value::<T>(self); + +        // SAFETY: The pointer in `self.0` is guaranteed to be valid by the type invariant. +        unsafe { core::ptr::drop_in_place::<T>(self.deref_mut()) }; + +        // SAFETY: +        // - `self.0` was previously allocated with `A`. +        // - `layout` is equal to the `Layout´ `self.0` was allocated with. +        unsafe { A::free(self.0.cast(), layout) }; +    } +} diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs new file mode 100644 index 000000000000..ae9d072741ce --- /dev/null +++ b/rust/kernel/alloc/kvec.rs @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Implementation of [`Vec`]. + +use super::{ +    allocator::{KVmalloc, Kmalloc, Vmalloc}, +    layout::ArrayLayout, +    AllocError, Allocator, Box, Flags, +}; +use core::{ +    fmt, +    marker::PhantomData, +    mem::{ManuallyDrop, MaybeUninit}, +    ops::Deref, +    ops::DerefMut, +    ops::Index, +    ops::IndexMut, +    ptr, +    ptr::NonNull, +    slice, +    slice::SliceIndex, +}; + +/// Create a [`KVec`] containing the arguments. +/// +/// New memory is allocated with `GFP_KERNEL`. +/// +/// # Examples +/// +/// ``` +/// let mut v = kernel::kvec![]; +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(v, [1]); +/// +/// let mut v = kernel::kvec![1; 3]?; +/// v.push(4, GFP_KERNEL)?; +/// assert_eq!(v, [1, 1, 1, 4]); +/// +/// let mut v = kernel::kvec![1, 2, 3]?; +/// v.push(4, GFP_KERNEL)?; +/// assert_eq!(v, [1, 2, 3, 4]); +/// +/// # Ok::<(), Error>(()) +/// ``` +#[macro_export] +macro_rules! kvec { +    () => ( +        $crate::alloc::KVec::new() +    ); +    ($elem:expr; $n:expr) => ( +        $crate::alloc::KVec::from_elem($elem, $n, GFP_KERNEL) +    ); +    ($($x:expr),+ $(,)?) => ( +        match $crate::alloc::KBox::new_uninit(GFP_KERNEL) { +            Ok(b) => Ok($crate::alloc::KVec::from($crate::alloc::KBox::write(b, [$($x),+]))), +            Err(e) => Err(e), +        } +    ); +} + +/// The kernel's [`Vec`] type. +/// +/// A contiguous growable array type with contents allocated with the kernel's allocators (e.g. +/// [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]), written `Vec<T, A>`. +/// +/// For non-zero-sized values, a [`Vec`] will use the given allocator `A` for its allocation. For +/// the most common allocators the type aliases [`KVec`], [`VVec`] and [`KVVec`] exist. +/// +/// For zero-sized types the [`Vec`]'s pointer must be `dangling_mut::<T>`; no memory is allocated. +/// +/// Generally, [`Vec`] consists of a pointer that represents the vector's backing buffer, the +/// capacity of the vector (the number of elements that currently fit into the vector), its length +/// (the number of elements that are currently stored in the vector) and the `Allocator` type used +/// to allocate (and free) the backing buffer. +/// +/// A [`Vec`] can be deconstructed into and (re-)constructed from its previously named raw parts +/// and manually modified. +/// +/// [`Vec`]'s backing buffer gets, if required, automatically increased (re-allocated) when elements +/// are added to the vector. +/// +/// # Invariants +/// +/// - `self.ptr` is always properly aligned and either points to memory allocated with `A` or, for +///   zero-sized types, is a dangling, well aligned pointer. +/// +/// - `self.len` always represents the exact number of elements stored in the vector. +/// +/// - `self.layout` represents the absolute number of elements that can be stored within the vector +///   without re-allocation. For ZSTs `self.layout`'s capacity is zero. However, it is legal for the +///   backing buffer to be larger than `layout`. +/// +/// - The `Allocator` type `A` of the vector is the exact same `Allocator` type the backing buffer +///   was allocated with (and must be freed with). +pub struct Vec<T, A: Allocator> { +    ptr: NonNull<T>, +    /// Represents the actual buffer size as `cap` times `size_of::<T>` bytes. +    /// +    /// Note: This isn't quite the same as `Self::capacity`, which in contrast returns the number of +    /// elements we can still store without reallocating. +    layout: ArrayLayout<T>, +    len: usize, +    _p: PhantomData<A>, +} + +/// Type alias for [`Vec`] with a [`Kmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = KVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type KVec<T> = Vec<T, Kmalloc>; + +/// Type alias for [`Vec`] with a [`Vmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = VVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type VVec<T> = Vec<T, Vmalloc>; + +/// Type alias for [`Vec`] with a [`KVmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = KVVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type KVVec<T> = Vec<T, KVmalloc>; + +// SAFETY: `Vec` is `Send` if `T` is `Send` because `Vec` owns its elements. +unsafe impl<T, A> Send for Vec<T, A> +where +    T: Send, +    A: Allocator, +{ +} + +// SAFETY: `Vec` is `Sync` if `T` is `Sync` because `Vec` owns its elements. +unsafe impl<T, A> Sync for Vec<T, A> +where +    T: Sync, +    A: Allocator, +{ +} + +impl<T, A> Vec<T, A> +where +    A: Allocator, +{ +    #[inline] +    const fn is_zst() -> bool { +        core::mem::size_of::<T>() == 0 +    } + +    /// Returns the number of elements that can be stored within the vector without allocating +    /// additional memory. +    pub fn capacity(&self) -> usize { +        if const { Self::is_zst() } { +            usize::MAX +        } else { +            self.layout.len() +        } +    } + +    /// Returns the number of elements stored within the vector. +    #[inline] +    pub fn len(&self) -> usize { +        self.len +    } + +    /// Forcefully sets `self.len` to `new_len`. +    /// +    /// # Safety +    /// +    /// - `new_len` must be less than or equal to [`Self::capacity`]. +    /// - If `new_len` is greater than `self.len`, all elements within the interval +    ///   [`self.len`,`new_len`) must be initialized. +    #[inline] +    pub unsafe fn set_len(&mut self, new_len: usize) { +        debug_assert!(new_len <= self.capacity()); +        self.len = new_len; +    } + +    /// Returns a slice of the entire vector. +    #[inline] +    pub fn as_slice(&self) -> &[T] { +        self +    } + +    /// Returns a mutable slice of the entire vector. +    #[inline] +    pub fn as_mut_slice(&mut self) -> &mut [T] { +        self +    } + +    /// Returns a mutable raw pointer to the vector's backing buffer, or, if `T` is a ZST, a +    /// dangling raw pointer. +    #[inline] +    pub fn as_mut_ptr(&mut self) -> *mut T { +        self.ptr.as_ptr() +    } + +    /// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw +    /// pointer. +    #[inline] +    pub fn as_ptr(&self) -> *const T { +        self.ptr.as_ptr() +    } + +    /// Returns `true` if the vector contains no elements, `false` otherwise. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = KVec::new(); +    /// assert!(v.is_empty()); +    /// +    /// v.push(1, GFP_KERNEL); +    /// assert!(!v.is_empty()); +    /// ``` +    #[inline] +    pub fn is_empty(&self) -> bool { +        self.len() == 0 +    } + +    /// Creates a new, empty `Vec<T, A>`. +    /// +    /// This method does not allocate by itself. +    #[inline] +    pub const fn new() -> Self { +        // INVARIANT: Since this is a new, empty `Vec` with no backing memory yet, +        // - `ptr` is a properly aligned dangling pointer for type `T`, +        // - `layout` is an empty `ArrayLayout` (zero capacity) +        // - `len` is zero, since no elements can be or have been stored, +        // - `A` is always valid. +        Self { +            ptr: NonNull::dangling(), +            layout: ArrayLayout::empty(), +            len: 0, +            _p: PhantomData::<A>, +        } +    } + +    /// Returns a slice of `MaybeUninit<T>` for the remaining spare capacity of the vector. +    pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] { +        // SAFETY: +        // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is +        //   guaranteed to be part of the same allocated object. +        // - `self.len` can not overflow `isize`. +        let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit<T>; + +        // SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated +        // and valid, but uninitialized. +        unsafe { slice::from_raw_parts_mut(ptr, self.capacity() - self.len) } +    } + +    /// Appends an element to the back of the [`Vec`] instance. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = KVec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// assert_eq!(&v, &[1]); +    /// +    /// v.push(2, GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 2]); +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> { +        self.reserve(1, flags)?; + +        // SAFETY: +        // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is +        //   guaranteed to be part of the same allocated object. +        // - `self.len` can not overflow `isize`. +        let ptr = unsafe { self.as_mut_ptr().add(self.len) }; + +        // SAFETY: +        // - `ptr` is properly aligned and valid for writes. +        unsafe { core::ptr::write(ptr, v) }; + +        // SAFETY: We just initialised the first spare entry, so it is safe to increase the length +        // by 1. We also know that the new length is <= capacity because of the previous call to +        // `reserve` above. +        unsafe { self.set_len(self.len() + 1) }; +        Ok(()) +    } + +    /// Creates a new [`Vec`] instance with at least the given capacity. +    /// +    /// # Examples +    /// +    /// ``` +    /// let v = KVec::<u32>::with_capacity(20, GFP_KERNEL)?; +    /// +    /// assert!(v.capacity() >= 20); +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> { +        let mut v = Vec::new(); + +        v.reserve(capacity, flags)?; + +        Ok(v) +    } + +    /// Creates a `Vec<T, A>` from a pointer, a length and a capacity using the allocator `A`. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = kernel::kvec![1, 2, 3]?; +    /// v.reserve(1, GFP_KERNEL)?; +    /// +    /// let (mut ptr, mut len, cap) = v.into_raw_parts(); +    /// +    /// // SAFETY: We've just reserved memory for another element. +    /// unsafe { ptr.add(len).write(4) }; +    /// len += 1; +    /// +    /// // SAFETY: We only wrote an additional element at the end of the `KVec`'s buffer and +    /// // correspondingly increased the length of the `KVec` by one. Otherwise, we construct it +    /// // from the exact same raw parts. +    /// let v = unsafe { KVec::from_raw_parts(ptr, len, cap) }; +    /// +    /// assert_eq!(v, [1, 2, 3, 4]); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    /// +    /// # Safety +    /// +    /// If `T` is a ZST: +    /// +    /// - `ptr` must be a dangling, well aligned pointer. +    /// +    /// Otherwise: +    /// +    /// - `ptr` must have been allocated with the allocator `A`. +    /// - `ptr` must satisfy or exceed the alignment requirements of `T`. +    /// - `ptr` must point to memory with a size of at least `size_of::<T>() * capacity` bytes. +    /// - The allocated size in bytes must not be larger than `isize::MAX`. +    /// - `length` must be less than or equal to `capacity`. +    /// - The first `length` elements must be initialized values of type `T`. +    /// +    /// It is also valid to create an empty `Vec` passing a dangling pointer for `ptr` and zero for +    /// `cap` and `len`. +    pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self { +        let layout = if Self::is_zst() { +            ArrayLayout::empty() +        } else { +            // SAFETY: By the safety requirements of this function, `capacity * size_of::<T>()` is +            // smaller than `isize::MAX`. +            unsafe { ArrayLayout::new_unchecked(capacity) } +        }; + +        // INVARIANT: For ZSTs, we store an empty `ArrayLayout`, all other type invariants are +        // covered by the safety requirements of this function. +        Self { +            // SAFETY: By the safety requirements, `ptr` is either dangling or pointing to a valid +            // memory allocation, allocated with `A`. +            ptr: unsafe { NonNull::new_unchecked(ptr) }, +            layout, +            len: length, +            _p: PhantomData::<A>, +        } +    } + +    /// Consumes the `Vec<T, A>` and returns its raw components `pointer`, `length` and `capacity`. +    /// +    /// This will not run the destructor of the contained elements and for non-ZSTs the allocation +    /// will stay alive indefinitely. Use [`Vec::from_raw_parts`] to recover the [`Vec`], drop the +    /// elements and free the allocation, if any. +    pub fn into_raw_parts(self) -> (*mut T, usize, usize) { +        let mut me = ManuallyDrop::new(self); +        let len = me.len(); +        let capacity = me.capacity(); +        let ptr = me.as_mut_ptr(); +        (ptr, len, capacity) +    } + +    /// Ensures that the capacity exceeds the length by at least `additional` elements. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = KVec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// +    /// v.reserve(10, GFP_KERNEL)?; +    /// let cap = v.capacity(); +    /// assert!(cap >= 10); +    /// +    /// v.reserve(10, GFP_KERNEL)?; +    /// let new_cap = v.capacity(); +    /// assert_eq!(new_cap, cap); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> { +        let len = self.len(); +        let cap = self.capacity(); + +        if cap - len >= additional { +            return Ok(()); +        } + +        if Self::is_zst() { +            // The capacity is already `usize::MAX` for ZSTs, we can't go higher. +            return Err(AllocError); +        } + +        // We know that `cap <= isize::MAX` because of the type invariants of `Self`. So the +        // multiplication by two won't overflow. +        let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?); +        let layout = ArrayLayout::new(new_cap).map_err(|_| AllocError)?; + +        // SAFETY: +        // - `ptr` is valid because it's either `None` or comes from a previous call to +        //   `A::realloc`. +        // - `self.layout` matches the `ArrayLayout` of the preceding allocation. +        let ptr = unsafe { +            A::realloc( +                Some(self.ptr.cast()), +                layout.into(), +                self.layout.into(), +                flags, +            )? +        }; + +        // INVARIANT: +        // - `layout` is some `ArrayLayout::<T>`, +        // - `ptr` has been created by `A::realloc` from `layout`. +        self.ptr = ptr.cast(); +        self.layout = layout; + +        Ok(()) +    } +} + +impl<T: Clone, A: Allocator> Vec<T, A> { +    /// Extend the vector by `n` clones of `value`. +    pub fn extend_with(&mut self, n: usize, value: T, flags: Flags) -> Result<(), AllocError> { +        if n == 0 { +            return Ok(()); +        } + +        self.reserve(n, flags)?; + +        let spare = self.spare_capacity_mut(); + +        for item in spare.iter_mut().take(n - 1) { +            item.write(value.clone()); +        } + +        // We can write the last element directly without cloning needlessly. +        spare[n - 1].write(value); + +        // SAFETY: +        // - `self.len() + n < self.capacity()` due to the call to reserve above, +        // - the loop and the line above initialized the next `n` elements. +        unsafe { self.set_len(self.len() + n) }; + +        Ok(()) +    } + +    /// Pushes clones of the elements of slice into the [`Vec`] instance. +    /// +    /// # Examples +    /// +    /// ``` +    /// let mut v = KVec::new(); +    /// v.push(1, GFP_KERNEL)?; +    /// +    /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 20, 30, 40]); +    /// +    /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?; +    /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]); +    /// # Ok::<(), Error>(()) +    /// ``` +    pub fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> { +        self.reserve(other.len(), flags)?; +        for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) { +            slot.write(item.clone()); +        } + +        // SAFETY: +        // - `other.len()` spare entries have just been initialized, so it is safe to increase +        //   the length by the same number. +        // - `self.len() + other.len() <= self.capacity()` is guaranteed by the preceding `reserve` +        //   call. +        unsafe { self.set_len(self.len() + other.len()) }; +        Ok(()) +    } + +    /// Create a new `Vec<T, A>` and extend it by `n` clones of `value`. +    pub fn from_elem(value: T, n: usize, flags: Flags) -> Result<Self, AllocError> { +        let mut v = Self::with_capacity(n, flags)?; + +        v.extend_with(n, value, flags)?; + +        Ok(v) +    } +} + +impl<T, A> Drop for Vec<T, A> +where +    A: Allocator, +{ +    fn drop(&mut self) { +        // SAFETY: `self.as_mut_ptr` is guaranteed to be valid by the type invariant. +        unsafe { +            ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut( +                self.as_mut_ptr(), +                self.len, +            )) +        }; + +        // SAFETY: +        // - `self.ptr` was previously allocated with `A`. +        // - `self.layout` matches the `ArrayLayout` of the preceding allocation. +        unsafe { A::free(self.ptr.cast(), self.layout.into()) }; +    } +} + +impl<T, A, const N: usize> From<Box<[T; N], A>> for Vec<T, A> +where +    A: Allocator, +{ +    fn from(b: Box<[T; N], A>) -> Vec<T, A> { +        let len = b.len(); +        let ptr = Box::into_raw(b); + +        // SAFETY: +        // - `b` has been allocated with `A`, +        // - `ptr` fulfills the alignment requirements for `T`, +        // - `ptr` points to memory with at least a size of `size_of::<T>() * len`, +        // - all elements within `b` are initialized values of `T`, +        // - `len` does not exceed `isize::MAX`. +        unsafe { Vec::from_raw_parts(ptr as _, len, len) } +    } +} + +impl<T> Default for KVec<T> { +    #[inline] +    fn default() -> Self { +        Self::new() +    } +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> { +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +        fmt::Debug::fmt(&**self, f) +    } +} + +impl<T, A> Deref for Vec<T, A> +where +    A: Allocator, +{ +    type Target = [T]; + +    #[inline] +    fn deref(&self) -> &[T] { +        // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len` +        // initialized elements of type `T`. +        unsafe { slice::from_raw_parts(self.as_ptr(), self.len) } +    } +} + +impl<T, A> DerefMut for Vec<T, A> +where +    A: Allocator, +{ +    #[inline] +    fn deref_mut(&mut self) -> &mut [T] { +        // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len` +        // initialized elements of type `T`. +        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } +    } +} + +impl<T: Eq, A> Eq for Vec<T, A> where A: Allocator {} + +impl<T, I: SliceIndex<[T]>, A> Index<I> for Vec<T, A> +where +    A: Allocator, +{ +    type Output = I::Output; + +    #[inline] +    fn index(&self, index: I) -> &Self::Output { +        Index::index(&**self, index) +    } +} + +impl<T, I: SliceIndex<[T]>, A> IndexMut<I> for Vec<T, A> +where +    A: Allocator, +{ +    #[inline] +    fn index_mut(&mut self, index: I) -> &mut Self::Output { +        IndexMut::index_mut(&mut **self, index) +    } +} + +macro_rules! impl_slice_eq { +    ($([$($vars:tt)*] $lhs:ty, $rhs:ty,)*) => { +        $( +            impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs +            where +                T: PartialEq<U>, +            { +                #[inline] +                fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] } +            } +        )* +    } +} + +impl_slice_eq! { +    [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, +    [A: Allocator] Vec<T, A>, &[U], +    [A: Allocator] Vec<T, A>, &mut [U], +    [A: Allocator] &[T], Vec<U, A>, +    [A: Allocator] &mut [T], Vec<U, A>, +    [A: Allocator] Vec<T, A>, [U], +    [A: Allocator] [T], Vec<U, A>, +    [A: Allocator, const N: usize] Vec<T, A>, [U; N], +    [A: Allocator, const N: usize] Vec<T, A>, &[U; N], +} + +impl<'a, T, A> IntoIterator for &'a Vec<T, A> +where +    A: Allocator, +{ +    type Item = &'a T; +    type IntoIter = slice::Iter<'a, T>; + +    fn into_iter(self) -> Self::IntoIter { +        self.iter() +    } +} + +impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> +where +    A: Allocator, +{ +    type Item = &'a mut T; +    type IntoIter = slice::IterMut<'a, T>; + +    fn into_iter(self) -> Self::IntoIter { +        self.iter_mut() +    } +} + +/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector. +/// +/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the +/// [`IntoIterator`] trait). +/// +/// # Examples +/// +/// ``` +/// let v = kernel::kvec![0, 1, 2]?; +/// let iter = v.into_iter(); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub struct IntoIter<T, A: Allocator> { +    ptr: *mut T, +    buf: NonNull<T>, +    len: usize, +    layout: ArrayLayout<T>, +    _p: PhantomData<A>, +} + +impl<T, A> IntoIter<T, A> +where +    A: Allocator, +{ +    fn into_raw_parts(self) -> (*mut T, NonNull<T>, usize, usize) { +        let me = ManuallyDrop::new(self); +        let ptr = me.ptr; +        let buf = me.buf; +        let len = me.len; +        let cap = me.layout.len(); +        (ptr, buf, len, cap) +    } + +    /// Same as `Iterator::collect` but specialized for `Vec`'s `IntoIter`. +    /// +    /// # Examples +    /// +    /// ``` +    /// let v = kernel::kvec![1, 2, 3]?; +    /// let mut it = v.into_iter(); +    /// +    /// assert_eq!(it.next(), Some(1)); +    /// +    /// let v = it.collect(GFP_KERNEL); +    /// assert_eq!(v, [2, 3]); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    /// +    /// # Implementation details +    /// +    /// Currently, we can't implement `FromIterator`. There are a couple of issues with this trait +    /// in the kernel, namely: +    /// +    /// - Rust's specialization feature is unstable. This prevents us to optimize for the special +    ///   case where `I::IntoIter` equals `Vec`'s `IntoIter` type. +    /// - We also can't use `I::IntoIter`'s type ID either to work around this, since `FromIterator` +    ///   doesn't require this type to be `'static`. +    /// - `FromIterator::from_iter` does return `Self` instead of `Result<Self, AllocError>`, hence +    ///   we can't properly handle allocation failures. +    /// - Neither `Iterator::collect` nor `FromIterator::from_iter` can handle additional allocation +    ///   flags. +    /// +    /// Instead, provide `IntoIter::collect`, such that we can at least convert a `IntoIter` into a +    /// `Vec` again. +    /// +    /// Note that `IntoIter::collect` doesn't require `Flags`, since it re-uses the existing backing +    /// buffer. However, this backing buffer may be shrunk to the actual count of elements. +    pub fn collect(self, flags: Flags) -> Vec<T, A> { +        let old_layout = self.layout; +        let (mut ptr, buf, len, mut cap) = self.into_raw_parts(); +        let has_advanced = ptr != buf.as_ptr(); + +        if has_advanced { +            // Copy the contents we have advanced to at the beginning of the buffer. +            // +            // SAFETY: +            // - `ptr` is valid for reads of `len * size_of::<T>()` bytes, +            // - `buf.as_ptr()` is valid for writes of `len * size_of::<T>()` bytes, +            // - `ptr` and `buf.as_ptr()` are not be subject to aliasing restrictions relative to +            //   each other, +            // - both `ptr` and `buf.ptr()` are properly aligned. +            unsafe { ptr::copy(ptr, buf.as_ptr(), len) }; +            ptr = buf.as_ptr(); + +            // SAFETY: `len` is guaranteed to be smaller than `self.layout.len()`. +            let layout = unsafe { ArrayLayout::<T>::new_unchecked(len) }; + +            // SAFETY: `buf` points to the start of the backing buffer and `len` is guaranteed to be +            // smaller than `cap`. Depending on `alloc` this operation may shrink the buffer or leaves +            // it as it is. +            ptr = match unsafe { +                A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags) +            } { +                // If we fail to shrink, which likely can't even happen, continue with the existing +                // buffer. +                Err(_) => ptr, +                Ok(ptr) => { +                    cap = len; +                    ptr.as_ptr().cast() +                } +            }; +        } + +        // SAFETY: If the iterator has been advanced, the advanced elements have been copied to +        // the beginning of the buffer and `len` has been adjusted accordingly. +        // +        // - `ptr` is guaranteed to point to the start of the backing buffer. +        // - `cap` is either the original capacity or, after shrinking the buffer, equal to `len`. +        // - `alloc` is guaranteed to be unchanged since `into_iter` has been called on the original +        //   `Vec`. +        unsafe { Vec::from_raw_parts(ptr, len, cap) } +    } +} + +impl<T, A> Iterator for IntoIter<T, A> +where +    A: Allocator, +{ +    type Item = T; + +    /// # Examples +    /// +    /// ``` +    /// let v = kernel::kvec![1, 2, 3]?; +    /// let mut it = v.into_iter(); +    /// +    /// assert_eq!(it.next(), Some(1)); +    /// assert_eq!(it.next(), Some(2)); +    /// assert_eq!(it.next(), Some(3)); +    /// assert_eq!(it.next(), None); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    fn next(&mut self) -> Option<T> { +        if self.len == 0 { +            return None; +        } + +        let current = self.ptr; + +        // SAFETY: We can't overflow; decreasing `self.len` by one every time we advance `self.ptr` +        // by one guarantees that. +        unsafe { self.ptr = self.ptr.add(1) }; + +        self.len -= 1; + +        // SAFETY: `current` is guaranteed to point at a valid element within the buffer. +        Some(unsafe { current.read() }) +    } + +    /// # Examples +    /// +    /// ``` +    /// let v: KVec<u32> = kernel::kvec![1, 2, 3]?; +    /// let mut iter = v.into_iter(); +    /// let size = iter.size_hint().0; +    /// +    /// iter.next(); +    /// assert_eq!(iter.size_hint().0, size - 1); +    /// +    /// iter.next(); +    /// assert_eq!(iter.size_hint().0, size - 2); +    /// +    /// iter.next(); +    /// assert_eq!(iter.size_hint().0, size - 3); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    fn size_hint(&self) -> (usize, Option<usize>) { +        (self.len, Some(self.len)) +    } +} + +impl<T, A> Drop for IntoIter<T, A> +where +    A: Allocator, +{ +    fn drop(&mut self) { +        // SAFETY: `self.ptr` is guaranteed to be valid by the type invariant. +        unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.ptr, self.len)) }; + +        // SAFETY: +        // - `self.buf` was previously allocated with `A`. +        // - `self.layout` matches the `ArrayLayout` of the preceding allocation. +        unsafe { A::free(self.buf.cast(), self.layout.into()) }; +    } +} + +impl<T, A> IntoIterator for Vec<T, A> +where +    A: Allocator, +{ +    type Item = T; +    type IntoIter = IntoIter<T, A>; + +    /// Consumes the `Vec<T, A>` and creates an `Iterator`, which moves each value out of the +    /// vector (from start to end). +    /// +    /// # Examples +    /// +    /// ``` +    /// let v = kernel::kvec![1, 2]?; +    /// let mut v_iter = v.into_iter(); +    /// +    /// let first_element: Option<u32> = v_iter.next(); +    /// +    /// assert_eq!(first_element, Some(1)); +    /// assert_eq!(v_iter.next(), Some(2)); +    /// assert_eq!(v_iter.next(), None); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    /// +    /// ``` +    /// let v = kernel::kvec![]; +    /// let mut v_iter = v.into_iter(); +    /// +    /// let first_element: Option<u32> = v_iter.next(); +    /// +    /// assert_eq!(first_element, None); +    /// +    /// # Ok::<(), Error>(()) +    /// ``` +    #[inline] +    fn into_iter(self) -> Self::IntoIter { +        let buf = self.ptr; +        let layout = self.layout; +        let (ptr, len, _) = self.into_raw_parts(); + +        IntoIter { +            ptr, +            buf, +            len, +            layout, +            _p: PhantomData::<A>, +        } +    } +} diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs new file mode 100644 index 000000000000..4b3cd7fdc816 --- /dev/null +++ b/rust/kernel/alloc/layout.rs @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Memory layout. +//! +//! Custom layout types extending or improving [`Layout`]. + +use core::{alloc::Layout, marker::PhantomData}; + +/// Error when constructing an [`ArrayLayout`]. +pub struct LayoutError; + +/// A layout for an array `[T; n]`. +/// +/// # Invariants +/// +/// - `len * size_of::<T>() <= isize::MAX`. +pub struct ArrayLayout<T> { +    len: usize, +    _phantom: PhantomData<fn() -> T>, +} + +impl<T> Clone for ArrayLayout<T> { +    fn clone(&self) -> Self { +        *self +    } +} +impl<T> Copy for ArrayLayout<T> {} + +const ISIZE_MAX: usize = isize::MAX as usize; + +impl<T> ArrayLayout<T> { +    /// Creates a new layout for `[T; 0]`. +    pub const fn empty() -> Self { +        // INVARIANT: `0 * size_of::<T>() <= isize::MAX`. +        Self { +            len: 0, +            _phantom: PhantomData, +        } +    } + +    /// Creates a new layout for `[T; len]`. +    /// +    /// # Errors +    /// +    /// When `len * size_of::<T>()` overflows or when `len * size_of::<T>() > isize::MAX`. +    pub const fn new(len: usize) -> Result<Self, LayoutError> { +        match len.checked_mul(core::mem::size_of::<T>()) { +            Some(size) if size <= ISIZE_MAX => { +                // INVARIANT: We checked above that `len * size_of::<T>() <= isize::MAX`. +                Ok(Self { +                    len, +                    _phantom: PhantomData, +                }) +            } +            _ => Err(LayoutError), +        } +    } + +    /// Creates a new layout for `[T; len]`. +    /// +    /// # Safety +    /// +    /// `len` must be a value, for which `len * size_of::<T>() <= isize::MAX` is true. +    pub unsafe fn new_unchecked(len: usize) -> Self { +        // INVARIANT: By the safety requirements of this function +        // `len * size_of::<T>() <= isize::MAX`. +        Self { +            len, +            _phantom: PhantomData, +        } +    } + +    /// Returns the number of array elements represented by this layout. +    pub const fn len(&self) -> usize { +        self.len +    } + +    /// Returns `true` when no array elements are represented by this layout. +    pub const fn is_empty(&self) -> bool { +        self.len == 0 +    } +} + +impl<T> From<ArrayLayout<T>> for Layout { +    fn from(value: ArrayLayout<T>) -> Self { +        let res = Layout::array::<T>(value.len); +        // SAFETY: By the type invariant of `ArrayLayout` we have +        // `len * size_of::<T>() <= isize::MAX` and thus the result must be `Ok`. +        unsafe { res.unwrap_unchecked() } +    } +} diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs deleted file mode 100644 index 1297a4be32e8..000000000000 --- a/rust/kernel/alloc/vec_ext.rs +++ /dev/null @@ -1,185 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -//! Extensions to [`Vec`] for fallible allocations. - -use super::{AllocError, Flags}; -use alloc::vec::Vec; - -/// Extensions to [`Vec`]. -pub trait VecExt<T>: Sized { -    /// Creates a new [`Vec`] instance with at least the given capacity. -    /// -    /// # Examples -    /// -    /// ``` -    /// let v = Vec::<u32>::with_capacity(20, GFP_KERNEL)?; -    /// -    /// assert!(v.capacity() >= 20); -    /// # Ok::<(), Error>(()) -    /// ``` -    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError>; - -    /// Appends an element to the back of the [`Vec`] instance. -    /// -    /// # Examples -    /// -    /// ``` -    /// let mut v = Vec::new(); -    /// v.push(1, GFP_KERNEL)?; -    /// assert_eq!(&v, &[1]); -    /// -    /// v.push(2, GFP_KERNEL)?; -    /// assert_eq!(&v, &[1, 2]); -    /// # Ok::<(), Error>(()) -    /// ``` -    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError>; - -    /// Pushes clones of the elements of slice into the [`Vec`] instance. -    /// -    /// # Examples -    /// -    /// ``` -    /// let mut v = Vec::new(); -    /// v.push(1, GFP_KERNEL)?; -    /// -    /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?; -    /// assert_eq!(&v, &[1, 20, 30, 40]); -    /// -    /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?; -    /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]); -    /// # Ok::<(), Error>(()) -    /// ``` -    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> -    where -        T: Clone; - -    /// Ensures that the capacity exceeds the length by at least `additional` elements. -    /// -    /// # Examples -    /// -    /// ``` -    /// let mut v = Vec::new(); -    /// v.push(1, GFP_KERNEL)?; -    /// -    /// v.reserve(10, GFP_KERNEL)?; -    /// let cap = v.capacity(); -    /// assert!(cap >= 10); -    /// -    /// v.reserve(10, GFP_KERNEL)?; -    /// let new_cap = v.capacity(); -    /// assert_eq!(new_cap, cap); -    /// -    /// # Ok::<(), Error>(()) -    /// ``` -    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError>; -} - -impl<T> VecExt<T> for Vec<T> { -    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> { -        let mut v = Vec::new(); -        <Self as VecExt<_>>::reserve(&mut v, capacity, flags)?; -        Ok(v) -    } - -    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> { -        <Self as VecExt<_>>::reserve(self, 1, flags)?; -        let s = self.spare_capacity_mut(); -        s[0].write(v); - -        // SAFETY: We just initialised the first spare entry, so it is safe to increase the length -        // by 1. We also know that the new length is <= capacity because of the previous call to -        // `reserve` above. -        unsafe { self.set_len(self.len() + 1) }; -        Ok(()) -    } - -    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> -    where -        T: Clone, -    { -        <Self as VecExt<_>>::reserve(self, other.len(), flags)?; -        for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) { -            slot.write(item.clone()); -        } - -        // SAFETY: We just initialised the `other.len()` spare entries, so it is safe to increase -        // the length by the same amount. We also know that the new length is <= capacity because -        // of the previous call to `reserve` above. -        unsafe { self.set_len(self.len() + other.len()) }; -        Ok(()) -    } - -    #[cfg(any(test, testlib))] -    fn reserve(&mut self, additional: usize, _flags: Flags) -> Result<(), AllocError> { -        Vec::reserve(self, additional); -        Ok(()) -    } - -    #[cfg(not(any(test, testlib)))] -    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> { -        let len = self.len(); -        let cap = self.capacity(); - -        if cap - len >= additional { -            return Ok(()); -        } - -        if core::mem::size_of::<T>() == 0 { -            // The capacity is already `usize::MAX` for SZTs, we can't go higher. -            return Err(AllocError); -        } - -        // We know cap is <= `isize::MAX` because `Layout::array` fails if the resulting byte size -        // is greater than `isize::MAX`. So the multiplication by two won't overflow. -        let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?); -        let layout = core::alloc::Layout::array::<T>(new_cap).map_err(|_| AllocError)?; - -        let (old_ptr, len, cap) = destructure(self); - -        // We need to make sure that `ptr` is either NULL or comes from a previous call to -        // `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be -        // dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity -        // to be zero if no memory has been allocated yet. -        let ptr = if cap == 0 { -            core::ptr::null_mut() -        } else { -            old_ptr -        }; - -        // SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to -        // `krealloc_aligned`. We also verified that the type is not a ZST. -        let new_ptr = unsafe { super::allocator::krealloc_aligned(ptr.cast(), layout, flags) }; -        if new_ptr.is_null() { -            // SAFETY: We are just rebuilding the existing `Vec` with no changes. -            unsafe { rebuild(self, old_ptr, len, cap) }; -            Err(AllocError) -        } else { -            // SAFETY: `ptr` has been reallocated with the layout for `new_cap` elements. New cap -            // is greater than `cap`, so it continues to be >= `len`. -            unsafe { rebuild(self, new_ptr.cast::<T>(), len, new_cap) }; -            Ok(()) -        } -    } -} - -#[cfg(not(any(test, testlib)))] -fn destructure<T>(v: &mut Vec<T>) -> (*mut T, usize, usize) { -    let mut tmp = Vec::new(); -    core::mem::swap(&mut tmp, v); -    let mut tmp = core::mem::ManuallyDrop::new(tmp); -    let len = tmp.len(); -    let cap = tmp.capacity(); -    (tmp.as_mut_ptr(), len, cap) -} - -/// Rebuilds a `Vec` from a pointer, length, and capacity. -/// -/// # Safety -/// -/// The same as [`Vec::from_raw_parts`]. -#[cfg(not(any(test, testlib)))] -unsafe fn rebuild<T>(v: &mut Vec<T>, ptr: *mut T, len: usize, cap: usize) { -    // SAFETY: The safety requirements from this function satisfy those of `from_raw_parts`. -    let mut tmp = unsafe { Vec::from_raw_parts(ptr, len, cap) }; -    core::mem::swap(&mut tmp, v); -} diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs index 9ba7fdfeb4b2..c8646d0d9866 100644 --- a/rust/kernel/block/mq/operations.rs +++ b/rust/kernel/block/mq/operations.rs @@ -131,7 +131,7 @@ impl<T: Operations> OperationsVTable<T> {      unsafe extern "C" fn poll_callback(          _hctx: *mut bindings::blk_mq_hw_ctx,          _iob: *mut bindings::io_comp_batch, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          T::poll().into()      } @@ -145,9 +145,9 @@ impl<T: Operations> OperationsVTable<T> {      /// for the same context.      unsafe extern "C" fn init_hctx_callback(          _hctx: *mut bindings::blk_mq_hw_ctx, -        _tagset_data: *mut core::ffi::c_void, -        _hctx_idx: core::ffi::c_uint, -    ) -> core::ffi::c_int { +        _tagset_data: *mut crate::ffi::c_void, +        _hctx_idx: crate::ffi::c_uint, +    ) -> crate::ffi::c_int {          from_result(|| Ok(0))      } @@ -159,7 +159,7 @@ impl<T: Operations> OperationsVTable<T> {      /// This function may only be called by blk-mq C infrastructure.      unsafe extern "C" fn exit_hctx_callback(          _hctx: *mut bindings::blk_mq_hw_ctx, -        _hctx_idx: core::ffi::c_uint, +        _hctx_idx: crate::ffi::c_uint,      ) {      } @@ -176,9 +176,9 @@ impl<T: Operations> OperationsVTable<T> {      unsafe extern "C" fn init_request_callback(          _set: *mut bindings::blk_mq_tag_set,          rq: *mut bindings::request, -        _hctx_idx: core::ffi::c_uint, -        _numa_node: core::ffi::c_uint, -    ) -> core::ffi::c_int { +        _hctx_idx: crate::ffi::c_uint, +        _numa_node: crate::ffi::c_uint, +    ) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: By the safety requirements of this function, `rq` points              // to a valid allocation. @@ -203,7 +203,7 @@ impl<T: Operations> OperationsVTable<T> {      unsafe extern "C" fn exit_request_callback(          _set: *mut bindings::blk_mq_tag_set,          rq: *mut bindings::request, -        _hctx_idx: core::ffi::c_uint, +        _hctx_idx: crate::ffi::c_uint,      ) {          // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory          // for the request data. diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs index 9222465d670b..7e2159e4f6a6 100644 --- a/rust/kernel/block/mq/raw_writer.rs +++ b/rust/kernel/block/mq/raw_writer.rs @@ -25,7 +25,7 @@ impl<'a> RawWriter<'a> {      }      pub(crate) fn from_array<const N: usize>( -        a: &'a mut [core::ffi::c_char; N], +        a: &'a mut [crate::ffi::c_char; N],      ) -> Result<RawWriter<'a>> {          Self::new(              // SAFETY: the buffer of `a` is valid for read and write as `u8` for diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs index a0e22827f3f4..7943f43b9575 100644 --- a/rust/kernel/block/mq/request.rs +++ b/rust/kernel/block/mq/request.rs @@ -16,50 +16,55 @@ use core::{      sync::atomic::{AtomicU64, Ordering},  }; -/// A wrapper around a blk-mq `struct request`. This represents an IO request. +/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.  ///  /// # Implementation details  ///  /// There are four states for a request that the Rust bindings care about:  /// -/// A) Request is owned by block layer (refcount 0) -/// B) Request is owned by driver but with zero `ARef`s in existence -///    (refcount 1) -/// C) Request is owned by driver with exactly one `ARef` in existence -///    (refcount 2) -/// D) Request is owned by driver with more than one `ARef` in existence -///    (refcount > 2) +/// 1. Request is owned by block layer (refcount 0). +/// 2. Request is owned by driver but with zero [`ARef`]s in existence +///    (refcount 1). +/// 3. Request is owned by driver with exactly one [`ARef`] in existence +///    (refcount 2). +/// 4. Request is owned by driver with more than one [`ARef`] in existence +///    (refcount > 2).  ///  /// -/// We need to track A and B to ensure we fail tag to request conversions for +/// We need to track 1 and 2 to ensure we fail tag to request conversions for  /// requests that are not owned by the driver.  /// -/// We need to track C and D to ensure that it is safe to end the request and hand +/// We need to track 3 and 4 to ensure that it is safe to end the request and hand  /// back ownership to the block layer.  ///  /// The states are tracked through the private `refcount` field of  /// `RequestDataWrapper`. This structure lives in the private data area of the C -/// `struct request`. +/// [`struct request`].  ///  /// # Invariants  /// -/// * `self.0` is a valid `struct request` created by the C portion of the kernel. +/// * `self.0` is a valid [`struct request`] created by the C portion of the +///   kernel.  /// * The private data area associated with this request must be an initialized  ///   and valid `RequestDataWrapper<T>`.  /// * `self` is reference counted by atomic modification of -///   self.wrapper_ref().refcount(). +///   `self.wrapper_ref().refcount()`. +/// +/// [`struct request`]: srctree/include/linux/blk-mq.h  ///  #[repr(transparent)]  pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);  impl<T: Operations> Request<T> { -    /// Create an `ARef<Request>` from a `struct request` pointer. +    /// Create an [`ARef<Request>`] from a [`struct request`] pointer.      ///      /// # Safety      ///      /// * The caller must own a refcount on `ptr` that is transferred to the -    ///   returned `ARef`. -    /// * The type invariants for `Request` must hold for the pointee of `ptr`. +    ///   returned [`ARef`]. +    /// * The type invariants for [`Request`] must hold for the pointee of `ptr`. +    /// +    /// [`struct request`]: srctree/include/linux/blk-mq.h      pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {          // INVARIANT: By the safety requirements of this function, invariants are upheld.          // SAFETY: By the safety requirement of this function, we own a @@ -84,12 +89,14 @@ impl<T: Operations> Request<T> {      }      /// Try to take exclusive ownership of `this` by dropping the refcount to 0. -    /// This fails if `this` is not the only `ARef` pointing to the underlying -    /// `Request`. +    /// This fails if `this` is not the only [`ARef`] pointing to the underlying +    /// [`Request`].      /// -    /// If the operation is successful, `Ok` is returned with a pointer to the -    /// C `struct request`. If the operation fails, `this` is returned in the -    /// `Err` variant. +    /// If the operation is successful, [`Ok`] is returned with a pointer to the +    /// C [`struct request`]. If the operation fails, `this` is returned in the +    /// [`Err`] variant. +    /// +    /// [`struct request`]: srctree/include/linux/blk-mq.h      fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {          // We can race with `TagSet::tag_to_rq`          if let Err(_old) = this.wrapper_ref().refcount().compare_exchange( @@ -109,7 +116,7 @@ impl<T: Operations> Request<T> {      /// Notify the block layer that the request has been completed without errors.      /// -    /// This function will return `Err` if `this` is not the only `ARef` +    /// This function will return [`Err`] if `this` is not the only [`ARef`]      /// referencing the request.      pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {          let request_ptr = Self::try_set_end(this)?; @@ -123,13 +130,13 @@ impl<T: Operations> Request<T> {          Ok(())      } -    /// Return a pointer to the `RequestDataWrapper` stored in the private area +    /// Return a pointer to the [`RequestDataWrapper`] stored in the private area      /// of the request structure.      ///      /// # Safety      ///      /// - `this` must point to a valid allocation of size at least size of -    ///   `Self` plus size of `RequestDataWrapper`. +    ///   [`Self`] plus size of [`RequestDataWrapper`].      pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {          let request_ptr = this.cast::<bindings::request>();          // SAFETY: By safety requirements for this function, `this` is a @@ -141,7 +148,7 @@ impl<T: Operations> Request<T> {          unsafe { NonNull::new_unchecked(wrapper_ptr) }      } -    /// Return a reference to the `RequestDataWrapper` stored in the private +    /// Return a reference to the [`RequestDataWrapper`] stored in the private      /// area of the request structure.      pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {          // SAFETY: By type invariant, `self.0` is a valid allocation. Further, @@ -152,13 +159,15 @@ impl<T: Operations> Request<T> {      }  } -/// A wrapper around data stored in the private area of the C `struct request`. +/// A wrapper around data stored in the private area of the C [`struct request`]. +/// +/// [`struct request`]: srctree/include/linux/blk-mq.h  pub(crate) struct RequestDataWrapper {      /// The Rust request refcount has the following states:      ///      /// - 0: The request is owned by C block layer. -    /// - 1: The request is owned by Rust abstractions but there are no ARef references to it. -    /// - 2+: There are `ARef` references to the request. +    /// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it. +    /// - 2+: There are [`ARef`] references to the request.      refcount: AtomicU64,  } @@ -204,7 +213,7 @@ fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64  }  /// Store the result of `op(target.load)` in `target` if `target.load() != -/// pred`, returning true if the target was updated. +/// pred`, returning [`true`] if the target was updated.  fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {      target          .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| { diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs index f9a1ca655a35..d7f175a05d99 100644 --- a/rust/kernel/block/mq/tag_set.rs +++ b/rust/kernel/block/mq/tag_set.rs @@ -53,7 +53,7 @@ impl<T: Operations> TagSet<T> {                      queue_depth: num_tags,                      cmd_size,                      flags: bindings::BLK_MQ_F_SHOULD_MERGE, -                    driver_data: core::ptr::null_mut::<core::ffi::c_void>(), +                    driver_data: core::ptr::null_mut::<crate::ffi::c_void>(),                      nr_maps: num_maps,                      ..tag_set                  } diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs index 6f1587a2524e..52c502432447 100644 --- a/rust/kernel/error.rs +++ b/rust/kernel/error.rs @@ -6,9 +6,10 @@  use crate::{alloc::AllocError, str::CStr}; -use alloc::alloc::LayoutError; +use core::alloc::LayoutError;  use core::fmt; +use core::num::NonZeroI32;  use core::num::TryFromIntError;  use core::str::Utf8Error; @@ -20,7 +21,11 @@ pub mod code {              $(              #[doc = $doc]              )* -            pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32)); +            pub const $err: super::Error = +                match super::Error::try_from_errno(-(crate::bindings::$err as i32)) { +                    Some(err) => err, +                    None => panic!("Invalid errno in `declare_err!`"), +                };          };      } @@ -88,14 +93,14 @@ pub mod code {  ///  /// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).  #[derive(Clone, Copy, PartialEq, Eq)] -pub struct Error(core::ffi::c_int); +pub struct Error(NonZeroI32);  impl Error {      /// Creates an [`Error`] from a kernel error code.      ///      /// It is a bug to pass an out-of-range `errno`. `EINVAL` would      /// be returned in such a case. -    pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error { +    pub fn from_errno(errno: crate::ffi::c_int) -> Error {          if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {              // TODO: Make it a `WARN_ONCE` once available.              crate::pr_warn!( @@ -107,7 +112,20 @@ impl Error {          // INVARIANT: The check above ensures the type invariant          // will hold. -        Error(errno) +        // SAFETY: `errno` is checked above to be in a valid range. +        unsafe { Error::from_errno_unchecked(errno) } +    } + +    /// Creates an [`Error`] from a kernel error code. +    /// +    /// Returns [`None`] if `errno` is out-of-range. +    const fn try_from_errno(errno: crate::ffi::c_int) -> Option<Error> { +        if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 { +            return None; +        } + +        // SAFETY: `errno` is checked above to be in a valid range. +        Some(unsafe { Error::from_errno_unchecked(errno) })      }      /// Creates an [`Error`] from a kernel error code. @@ -115,38 +133,38 @@ impl Error {      /// # Safety      ///      /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`). -    unsafe fn from_errno_unchecked(errno: core::ffi::c_int) -> Error { +    const unsafe fn from_errno_unchecked(errno: crate::ffi::c_int) -> Error {          // INVARIANT: The contract ensures the type invariant          // will hold. -        Error(errno) +        // SAFETY: The caller guarantees `errno` is non-zero. +        Error(unsafe { NonZeroI32::new_unchecked(errno) })      }      /// Returns the kernel error code. -    pub fn to_errno(self) -> core::ffi::c_int { -        self.0 +    pub fn to_errno(self) -> crate::ffi::c_int { +        self.0.get()      }      #[cfg(CONFIG_BLOCK)]      pub(crate) fn to_blk_status(self) -> bindings::blk_status_t {          // SAFETY: `self.0` is a valid error due to its invariant. -        unsafe { bindings::errno_to_blk_status(self.0) } +        unsafe { bindings::errno_to_blk_status(self.0.get()) }      }      /// Returns the error encoded as a pointer. -    #[allow(dead_code)] -    pub(crate) fn to_ptr<T>(self) -> *mut T { +    pub fn to_ptr<T>(self) -> *mut T {          #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]          // SAFETY: `self.0` is a valid error due to its invariant.          unsafe { -            bindings::ERR_PTR(self.0.into()) as *mut _ +            bindings::ERR_PTR(self.0.get().into()) as *mut _          }      }      /// Returns a string representing the error, if one exists. -    #[cfg(not(testlib))] +    #[cfg(not(any(test, testlib)))]      pub fn name(&self) -> Option<&'static CStr> {          // SAFETY: Just an FFI call, there are no extra safety requirements. -        let ptr = unsafe { bindings::errname(-self.0) }; +        let ptr = unsafe { bindings::errname(-self.0.get()) };          if ptr.is_null() {              None          } else { @@ -160,7 +178,7 @@ impl Error {      /// When `testlib` is configured, this always returns `None` to avoid the dependency on a      /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still      /// run in userspace. -    #[cfg(testlib)] +    #[cfg(any(test, testlib))]      pub fn name(&self) -> Option<&'static CStr> {          None      } @@ -171,9 +189,11 @@ impl fmt::Debug for Error {          match self.name() {              // Print out number if no name can be found.              None => f.debug_tuple("Error").field(&-self.0).finish(), -            // SAFETY: These strings are ASCII-only.              Some(name) => f -                .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) }) +                .debug_tuple( +                    // SAFETY: These strings are ASCII-only. +                    unsafe { core::str::from_utf8_unchecked(name) }, +                )                  .finish(),          }      } @@ -239,7 +259,7 @@ pub type Result<T = (), E = Error> = core::result::Result<T, E>;  /// Converts an integer as returned by a C kernel function to an error if it's negative, and  /// `Ok(())` otherwise. -pub fn to_result(err: core::ffi::c_int) -> Result { +pub fn to_result(err: crate::ffi::c_int) -> Result {      if err < 0 {          Err(Error::from_errno(err))      } else { @@ -262,21 +282,21 @@ pub fn to_result(err: core::ffi::c_int) -> Result {  /// fn devm_platform_ioremap_resource(  ///     pdev: &mut PlatformDevice,  ///     index: u32, -/// ) -> Result<*mut core::ffi::c_void> { +/// ) -> Result<*mut kernel::ffi::c_void> {  ///     // SAFETY: `pdev` points to a valid platform device. There are no safety requirements  ///     // on `index`.  ///     from_err_ptr(unsafe { bindings::devm_platform_ioremap_resource(pdev.to_ptr(), index) })  /// }  /// ``` -// TODO: Remove `dead_code` marker once an in-kernel client is available. -#[allow(dead_code)] -pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> { -    // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid. -    let const_ptr: *const core::ffi::c_void = ptr.cast(); +pub fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> { +    // CAST: Casting a pointer to `*const crate::ffi::c_void` is always valid. +    let const_ptr: *const crate::ffi::c_void = ptr.cast();      // SAFETY: The FFI function does not deref the pointer.      if unsafe { bindings::IS_ERR(const_ptr) } {          // SAFETY: The FFI function does not deref the pointer.          let err = unsafe { bindings::PTR_ERR(const_ptr) }; + +        #[allow(clippy::unnecessary_cast)]          // CAST: If `IS_ERR()` returns `true`,          // then `PTR_ERR()` is guaranteed to return a          // negative value greater-or-equal to `-bindings::MAX_ERRNO`, @@ -286,8 +306,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {          //          // SAFETY: `IS_ERR()` ensures `err` is a          // negative value greater-or-equal to `-bindings::MAX_ERRNO`. -        #[allow(clippy::unnecessary_cast)] -        return Err(unsafe { Error::from_errno_unchecked(err as core::ffi::c_int) }); +        return Err(unsafe { Error::from_errno_unchecked(err as crate::ffi::c_int) });      }      Ok(ptr)  } @@ -307,7 +326,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {  /// # use kernel::bindings;  /// unsafe extern "C" fn probe_callback(  ///     pdev: *mut bindings::platform_device, -/// ) -> core::ffi::c_int { +/// ) -> kernel::ffi::c_int {  ///     from_result(|| {  ///         let ptr = devm_alloc(pdev)?;  ///         bindings::platform_set_drvdata(pdev, ptr); @@ -315,9 +334,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {  ///     })  /// }  /// ``` -// TODO: Remove `dead_code` marker once an in-kernel client is available. -#[allow(dead_code)] -pub(crate) fn from_result<T, F>(f: F) -> T +pub fn from_result<T, F>(f: F) -> T  where      T: From<i16>,      F: FnOnce() -> Result<T>, diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index a17ac8762d8f..347049df556b 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -13,7 +13,7 @@  //! To initialize a `struct` with an in-place constructor you will need two things:  //! - an in-place constructor,  //! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`], -//!   [`UniqueArc<T>`], [`Box<T>`] or any other smart pointer that implements [`InPlaceInit`]). +//!   [`UniqueArc<T>`], [`KBox<T>`] or any other smart pointer that implements [`InPlaceInit`]).  //!  //! To get an in-place constructor there are generally three options:  //! - directly creating an in-place constructor using the [`pin_init!`] macro, @@ -35,7 +35,7 @@  //! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.  //!  //! ```rust -//! # #![allow(clippy::disallowed_names)] +//! # #![expect(clippy::disallowed_names)]  //! use kernel::sync::{new_mutex, Mutex};  //! # use core::pin::Pin;  //! #[pin_data] @@ -55,7 +55,7 @@  //! (or just the stack) to actually initialize a `Foo`:  //!  //! ```rust -//! # #![allow(clippy::disallowed_names)] +//! # #![expect(clippy::disallowed_names)]  //! # use kernel::sync::{new_mutex, Mutex};  //! # use core::pin::Pin;  //! # #[pin_data] @@ -68,7 +68,7 @@  //! #     a <- new_mutex!(42, "Foo::a"),  //! #     b: 24,  //! # }); -//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo, GFP_KERNEL); +//! let foo: Result<Pin<KBox<Foo>>> = KBox::pin_init(foo, GFP_KERNEL);  //! ```  //!  //! For more information see the [`pin_init!`] macro. @@ -87,20 +87,19 @@  //! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:  //!  //! ```rust -//! # #![allow(clippy::disallowed_names)]  //! # use kernel::{sync::Mutex, new_mutex, init::PinInit, try_pin_init};  //! #[pin_data]  //! struct DriverData {  //!     #[pin]  //!     status: Mutex<i32>, -//!     buffer: Box<[u8; 1_000_000]>, +//!     buffer: KBox<[u8; 1_000_000]>,  //! }  //!  //! impl DriverData {  //!     fn new() -> impl PinInit<Self, Error> {  //!         try_pin_init!(Self {  //!             status <- new_mutex!(0, "DriverData::status"), -//!             buffer: Box::init(kernel::init::zeroed(), GFP_KERNEL)?, +//!             buffer: KBox::init(kernel::init::zeroed(), GFP_KERNEL)?,  //!         })  //!     }  //! } @@ -121,11 +120,12 @@  //!   `slot` gets called.  //!  //! ```rust -//! # #![allow(unreachable_pub, clippy::disallowed_names)] +//! # #![expect(unreachable_pub, clippy::disallowed_names)]  //! use kernel::{init, types::Opaque};  //! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};  //! # mod bindings { -//! #     #![allow(non_camel_case_types)] +//! #     #![expect(non_camel_case_types)] +//! #     #![expect(clippy::missing_safety_doc)]  //! #     pub struct foo;  //! #     pub unsafe fn init_foo(_ptr: *mut foo) {}  //! #     pub unsafe fn destroy_foo(_ptr: *mut foo) {} @@ -133,7 +133,7 @@  //! # }  //! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.  //! # trait FromErrno { -//! #     fn from_errno(errno: core::ffi::c_int) -> Error { +//! #     fn from_errno(errno: kernel::ffi::c_int) -> Error {  //! #         // Dummy error that can be constructed outside the `kernel` crate.  //! #         Error::from(core::fmt::Error)  //! #     } @@ -211,13 +211,12 @@  //! [`pin_init!`]: crate::pin_init!  use crate::{ -    alloc::{box_ext::BoxExt, AllocError, Flags}, +    alloc::{AllocError, Flags, KBox},      error::{self, Error},      sync::Arc,      sync::UniqueArc,      types::{Opaque, ScopeGuard},  }; -use alloc::boxed::Box;  use core::{      cell::UnsafeCell,      convert::Infallible, @@ -238,7 +237,7 @@ pub mod macros;  /// # Examples  ///  /// ```rust -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)]  /// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};  /// # use core::pin::Pin;  /// #[pin_data] @@ -290,7 +289,7 @@ macro_rules! stack_pin_init {  /// # Examples  ///  /// ```rust,ignore -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};  /// # use macros::pin_data;  /// # use core::{alloc::AllocError, pin::Pin}; @@ -298,7 +297,7 @@ macro_rules! stack_pin_init {  /// struct Foo {  ///     #[pin]  ///     a: Mutex<usize>, -///     b: Box<Bar>, +///     b: KBox<Bar>,  /// }  ///  /// struct Bar { @@ -307,7 +306,7 @@ macro_rules! stack_pin_init {  ///  /// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {  ///     a <- new_mutex!(42), -///     b: Box::new(Bar { +///     b: KBox::new(Bar {  ///         x: 64,  ///     }, GFP_KERNEL)?,  /// })); @@ -316,7 +315,7 @@ macro_rules! stack_pin_init {  /// ```  ///  /// ```rust,ignore -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};  /// # use macros::pin_data;  /// # use core::{alloc::AllocError, pin::Pin}; @@ -324,7 +323,7 @@ macro_rules! stack_pin_init {  /// struct Foo {  ///     #[pin]  ///     a: Mutex<usize>, -///     b: Box<Bar>, +///     b: KBox<Bar>,  /// }  ///  /// struct Bar { @@ -333,7 +332,7 @@ macro_rules! stack_pin_init {  ///  /// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {  ///     a <- new_mutex!(42), -///     b: Box::new(Bar { +///     b: KBox::new(Bar {  ///         x: 64,  ///     }, GFP_KERNEL)?,  /// })); @@ -368,7 +367,6 @@ macro_rules! stack_try_pin_init {  /// The syntax is almost identical to that of a normal `struct` initializer:  ///  /// ```rust -/// # #![allow(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, macros::pin_data, init::*};  /// # use core::pin::Pin;  /// #[pin_data] @@ -392,7 +390,7 @@ macro_rules! stack_try_pin_init {  ///     },  /// });  /// # initializer } -/// # Box::pin_init(demo(), GFP_KERNEL).unwrap(); +/// # KBox::pin_init(demo(), GFP_KERNEL).unwrap();  /// ```  ///  /// Arbitrary Rust expressions can be used to set the value of a variable. @@ -413,7 +411,6 @@ macro_rules! stack_try_pin_init {  /// To create an initializer function, simply declare it like this:  ///  /// ```rust -/// # #![allow(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, init::*};  /// # use core::pin::Pin;  /// # #[pin_data] @@ -440,7 +437,7 @@ macro_rules! stack_try_pin_init {  /// Users of `Foo` can now create it like this:  ///  /// ```rust -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, macros::pin_data, init::*};  /// # use core::pin::Pin;  /// # #[pin_data] @@ -462,13 +459,12 @@ macro_rules! stack_try_pin_init {  /// #         })  /// #     }  /// # } -/// let foo = Box::pin_init(Foo::new(), GFP_KERNEL); +/// let foo = KBox::pin_init(Foo::new(), GFP_KERNEL);  /// ```  ///  /// They can also easily embed it into their own `struct`s:  ///  /// ```rust -/// # #![allow(clippy::disallowed_names)]  /// # use kernel::{init, pin_init, macros::pin_data, init::*};  /// # use core::pin::Pin;  /// # #[pin_data] @@ -541,6 +537,7 @@ macro_rules! stack_try_pin_init {  /// }  /// pin_init!(&this in Buf {  ///     buf: [0; 64], +///     // SAFETY: TODO.  ///     ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },  ///     pin: PhantomPinned,  /// }); @@ -590,11 +587,10 @@ macro_rules! pin_init {  /// # Examples  ///  /// ```rust -/// # #![feature(new_uninit)]  /// use kernel::{init::{self, PinInit}, error::Error};  /// #[pin_data]  /// struct BigBuf { -///     big: Box<[u8; 1024 * 1024 * 1024]>, +///     big: KBox<[u8; 1024 * 1024 * 1024]>,  ///     small: [u8; 1024 * 1024],  ///     ptr: *mut u8,  /// } @@ -602,7 +598,7 @@ macro_rules! pin_init {  /// impl BigBuf {  ///     fn new() -> impl PinInit<Self, Error> {  ///         try_pin_init!(Self { -///             big: Box::init(init::zeroed(), GFP_KERNEL)?, +///             big: KBox::init(init::zeroed(), GFP_KERNEL)?,  ///             small: [0; 1024 * 1024],  ///             ptr: core::ptr::null_mut(),  ///         }? Error) @@ -694,16 +690,16 @@ macro_rules! init {  /// # Examples  ///  /// ```rust -/// use kernel::{init::{PinInit, zeroed}, error::Error}; +/// use kernel::{alloc::KBox, init::{PinInit, zeroed}, error::Error};  /// struct BigBuf { -///     big: Box<[u8; 1024 * 1024 * 1024]>, +///     big: KBox<[u8; 1024 * 1024 * 1024]>,  ///     small: [u8; 1024 * 1024],  /// }  ///  /// impl BigBuf {  ///     fn new() -> impl Init<Self, Error> {  ///         try_init!(Self { -///             big: Box::init(zeroed(), GFP_KERNEL)?, +///             big: KBox::init(zeroed(), GFP_KERNEL)?,  ///             small: [0; 1024 * 1024],  ///         }? Error)  ///     } @@ -814,8 +810,8 @@ macro_rules! assert_pinned {  /// A pin-initializer for the type `T`.  ///  /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can -/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the -/// [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this. +/// be [`KBox<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use +/// the [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this.  ///  /// Also see the [module description](self).  /// @@ -854,7 +850,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {      /// # Examples      ///      /// ```rust -    /// # #![allow(clippy::disallowed_names)] +    /// # #![expect(clippy::disallowed_names)]      /// use kernel::{types::Opaque, init::pin_init_from_closure};      /// #[repr(C)]      /// struct RawFoo([u8; 16]); @@ -875,6 +871,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {      /// }      ///      /// let foo = pin_init!(Foo { +    ///     // SAFETY: TODO.      ///     raw <- unsafe {      ///         Opaque::ffi_init(|s| {      ///             init_foo(s); @@ -894,7 +891,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {  }  /// An initializer returned by [`PinInit::pin_chain`]. -pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>); +pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, KBox<T>)>);  // SAFETY: The `__pinned_init` function is implemented such that it  // - returns `Ok(())` on successful initialization, @@ -920,8 +917,8 @@ where  /// An initializer for `T`.  ///  /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can -/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the -/// [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because +/// be [`KBox<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use +/// the [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because  /// [`PinInit<T, E>`] is a super trait, you can use every function that takes it as well.  ///  /// Also see the [module description](self). @@ -965,7 +962,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {      /// # Examples      ///      /// ```rust -    /// # #![allow(clippy::disallowed_names)] +    /// # #![expect(clippy::disallowed_names)]      /// use kernel::{types::Opaque, init::{self, init_from_closure}};      /// struct Foo {      ///     buf: [u8; 1_000_000], @@ -993,7 +990,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {  }  /// An initializer returned by [`Init::chain`]. -pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>); +pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, KBox<T>)>);  // SAFETY: The `__init` function is implemented such that it  // - returns `Ok(())` on successful initialization, @@ -1077,8 +1074,9 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {  /// # Examples  ///  /// ```rust -/// use kernel::{error::Error, init::init_array_from_fn}; -/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i), GFP_KERNEL).unwrap(); +/// use kernel::{alloc::KBox, error::Error, init::init_array_from_fn}; +/// let array: KBox<[usize; 1_000]> = +///     KBox::init::<Error>(init_array_from_fn(|i| i), GFP_KERNEL).unwrap();  /// assert_eq!(array.len(), 1_000);  /// ```  pub fn init_array_from_fn<I, const N: usize, T, E>( @@ -1162,6 +1160,7 @@ where  // SAFETY: Every type can be initialized by-value.  unsafe impl<T, E> Init<T, E> for T {      unsafe fn __init(self, slot: *mut T) -> Result<(), E> { +        // SAFETY: TODO.          unsafe { slot.write(self) };          Ok(())      } @@ -1170,6 +1169,7 @@ unsafe impl<T, E> Init<T, E> for T {  // SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.  unsafe impl<T, E> PinInit<T, E> for T {      unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> { +        // SAFETY: TODO.          unsafe { self.__init(slot) }      }  } @@ -1243,26 +1243,6 @@ impl<T> InPlaceInit<T> for Arc<T> {      }  } -impl<T> InPlaceInit<T> for Box<T> { -    type PinnedSelf = Pin<Self>; - -    #[inline] -    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E> -    where -        E: From<AllocError>, -    { -        <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init) -    } - -    #[inline] -    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E> -    where -        E: From<AllocError>, -    { -        <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_init(init) -    } -} -  impl<T> InPlaceInit<T> for UniqueArc<T> {      type PinnedSelf = Pin<Self>; @@ -1299,28 +1279,6 @@ pub trait InPlaceWrite<T> {      fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;  } -impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> { -    type Initialized = Box<T>; - -    fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> { -        let slot = self.as_mut_ptr(); -        // SAFETY: When init errors/panics, slot will get deallocated but not dropped, -        // slot is valid. -        unsafe { init.__init(slot)? }; -        // SAFETY: All fields have been initialized. -        Ok(unsafe { self.assume_init() }) -    } - -    fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> { -        let slot = self.as_mut_ptr(); -        // SAFETY: When init errors/panics, slot will get deallocated but not dropped, -        // slot is valid and will not be moved, because we pin it later. -        unsafe { init.__pinned_init(slot)? }; -        // SAFETY: All fields have been initialized. -        Ok(unsafe { self.assume_init() }.into()) -    } -} -  impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {      type Initialized = UniqueArc<T>; @@ -1411,6 +1369,7 @@ pub fn zeroed<T: Zeroable>() -> impl Init<T> {  macro_rules! impl_zeroable {      ($($({$($generics:tt)*})? $t:ty, )*) => { +        // SAFETY: Safety comments written in the macro invocation.          $(unsafe impl$($($generics)*)? Zeroable for $t {})*      };  } @@ -1451,7 +1410,7 @@ impl_zeroable! {      //      // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant.      {<T: ?Sized>} Option<NonNull<T>>, -    {<T: ?Sized>} Option<Box<T>>, +    {<T: ?Sized>} Option<KBox<T>>,      // SAFETY: `null` pointer is valid.      // diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs index 13cefd37512f..74329cc3262c 100644 --- a/rust/kernel/init/__internal.rs +++ b/rust/kernel/init/__internal.rs @@ -15,9 +15,10 @@ use super::*;  /// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns  pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>; -/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this -/// type, since the closure needs to fulfill the same safety requirement as the -/// `__pinned_init`/`__init` functions. +/// Module-internal type implementing `PinInit` and `Init`. +/// +/// It is unsafe to create this type, since the closure needs to fulfill the same safety +/// requirement as the `__pinned_init`/`__init` functions.  pub(crate) struct InitClosure<F, T: ?Sized, E>(pub(crate) F, pub(crate) Invariant<(E, T)>);  // SAFETY: While constructing the `InitClosure`, the user promised that it upholds the @@ -53,6 +54,7 @@ where  pub unsafe trait HasPinData {      type PinData: PinData; +    #[expect(clippy::missing_safety_doc)]      unsafe fn __pin_data() -> Self::PinData;  } @@ -82,6 +84,7 @@ pub unsafe trait PinData: Copy {  pub unsafe trait HasInitData {      type InitData: InitData; +    #[expect(clippy::missing_safety_doc)]      unsafe fn __init_data() -> Self::InitData;  } @@ -102,7 +105,7 @@ pub unsafe trait InitData: Copy {      }  } -pub struct AllData<T: ?Sized>(PhantomData<fn(Box<T>) -> Box<T>>); +pub struct AllData<T: ?Sized>(PhantomData<fn(KBox<T>) -> KBox<T>>);  impl<T: ?Sized> Clone for AllData<T> {      fn clone(&self) -> Self { @@ -112,10 +115,12 @@ impl<T: ?Sized> Clone for AllData<T> {  impl<T: ?Sized> Copy for AllData<T> {} +// SAFETY: TODO.  unsafe impl<T: ?Sized> InitData for AllData<T> {      type Datee = T;  } +// SAFETY: TODO.  unsafe impl<T: ?Sized> HasInitData for T {      type InitData = AllData<T>; diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs index 9a0c4650ef67..1fd146a83241 100644 --- a/rust/kernel/init/macros.rs +++ b/rust/kernel/init/macros.rs @@ -182,13 +182,13 @@  //!     // Normally `Drop` bounds do not have the correct semantics, but for this purpose they do  //!     // (normally people want to know if a type has any kind of drop glue at all, here we want  //!     // to know if it has any kind of custom drop glue, which is exactly what this bound does). -//!     #[allow(drop_bounds)] +//!     #[expect(drop_bounds)]  //!     impl<T: ::core::ops::Drop> MustNotImplDrop for T {}  //!     impl<T> MustNotImplDrop for Bar<T> {}  //!     // Here comes a convenience check, if one implemented `PinnedDrop`, but forgot to add it to  //!     // `#[pin_data]`, then this will error with the same mechanic as above, this is not needed  //!     // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`. -//!     #[allow(non_camel_case_types)] +//!     #[expect(non_camel_case_types)]  //!     trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}  //!     impl<  //!         T: ::kernel::init::PinnedDrop, @@ -513,6 +513,7 @@ macro_rules! __pinned_drop {              }          ),      ) => { +        // SAFETY: TODO.          unsafe $($impl_sig)* {              // Inherit all attributes and the type/ident tokens for the signature.              $(#[$($attr)*])* @@ -872,6 +873,7 @@ macro_rules! __pin_data {                  }              } +            // SAFETY: TODO.              unsafe impl<$($impl_generics)*>                  $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*>              where $($whr)* @@ -923,14 +925,14 @@ macro_rules! __pin_data {          // `Drop`. Additionally we will implement this trait for the struct leading to a conflict,          // if it also implements `Drop`          trait MustNotImplDrop {} -        #[allow(drop_bounds)] +        #[expect(drop_bounds)]          impl<T: ::core::ops::Drop> MustNotImplDrop for T {}          impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*>          where $($whr)* {}          // We also take care to prevent users from writing a useless `PinnedDrop` implementation.          // They might implement `PinnedDrop` correctly for the struct, but forget to give          // `PinnedDrop` as the parameter to `#[pin_data]`. -        #[allow(non_camel_case_types)] +        #[expect(non_camel_case_types)]          trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}          impl<T: $crate::init::PinnedDrop>              UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {} @@ -987,6 +989,7 @@ macro_rules! __pin_data {          //          // The functions are `unsafe` to prevent accidentally calling them.          #[allow(dead_code)] +        #[expect(clippy::missing_safety_doc)]          impl<$($impl_generics)*> $pin_data<$($ty_generics)*>          where $($whr)*          { @@ -997,6 +1000,7 @@ macro_rules! __pin_data {                      slot: *mut $p_type,                      init: impl $crate::init::PinInit<$p_type, E>,                  ) -> ::core::result::Result<(), E> { +                    // SAFETY: TODO.                      unsafe { $crate::init::PinInit::__pinned_init(init, slot) }                  }              )* @@ -1007,6 +1011,7 @@ macro_rules! __pin_data {                      slot: *mut $type,                      init: impl $crate::init::Init<$type, E>,                  ) -> ::core::result::Result<(), E> { +                    // SAFETY: TODO.                      unsafe { $crate::init::Init::__init(init, slot) }                  }              )* @@ -1121,6 +1126,8 @@ macro_rules! __init_internal {          // no possibility of returning without `unsafe`.          struct __InitOk;          // Get the data about fields from the supplied type. +        // +        // SAFETY: TODO.          let data = unsafe {              use $crate::init::__internal::$has_data;              // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal @@ -1176,6 +1183,7 @@ macro_rules! __init_internal {          let init = move |slot| -> ::core::result::Result<(), $err> {              init(slot).map(|__InitOk| ())          }; +        // SAFETY: TODO.          let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };          init      }}; @@ -1324,6 +1332,8 @@ macro_rules! __init_internal {          // Endpoint, nothing more to munch, create the initializer.          // Since we are in the closure that is never called, this will never get executed.          // We abuse `slot` to get the correct type inference here: +        // +        // SAFETY: TODO.          unsafe {              // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal              // information that is associated to already parsed fragments, so a path fragment diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs index cfa7d080b531..2fc7662339e5 100644 --- a/rust/kernel/ioctl.rs +++ b/rust/kernel/ioctl.rs @@ -4,7 +4,7 @@  //!  //! C header: [`include/asm-generic/ioctl.h`](srctree/include/asm-generic/ioctl.h) -#![allow(non_snake_case)] +#![expect(non_snake_case)]  use crate::build_assert; diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 661939357e80..04dbee70d3e6 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -12,10 +12,11 @@  //! do so first instead of bypassing this crate.  #![no_std] +#![feature(arbitrary_self_types)]  #![feature(coerce_unsized)]  #![feature(dispatch_from_dyn)] -#![feature(new_uninit)] -#![feature(receiver_trait)] +#![feature(inline_const)] +#![feature(lint_reasons)]  #![feature(unsize)]  // Ensure conditional compilation based on the kernel configuration works; @@ -26,6 +27,8 @@ compile_error!("Missing kernel configuration for conditional compilation");  // Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate).  extern crate self as kernel; +pub use ffi; +  pub mod alloc;  #[cfg(CONFIG_BLOCK)]  pub mod block; @@ -60,6 +63,7 @@ pub mod sync;  pub mod task;  pub mod time;  pub mod tracepoint; +pub mod transmute;  pub mod types;  pub mod uaccess;  pub mod workqueue; @@ -90,7 +94,7 @@ pub trait Module: Sized + Sync + Send {  /// Equivalent to `THIS_MODULE` in the C API.  /// -/// C header: [`include/linux/export.h`](srctree/include/linux/export.h) +/// C header: [`include/linux/init.h`](srctree/include/linux/init.h)  pub struct ThisModule(*mut bindings::module);  // SAFETY: `THIS_MODULE` may be used from all threads within a module. diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs index 5b4aec29eb67..fb93330f4af4 100644 --- a/rust/kernel/list.rs +++ b/rust/kernel/list.rs @@ -354,6 +354,7 @@ impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {      ///      /// `item` must not be in a different linked list (with the same id).      pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> { +        // SAFETY: TODO.          let mut item = unsafe { ListLinks::fields(T::view_links(item)) };          // SAFETY: The user provided a reference, and reference are never dangling.          // diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs index d801b9dc6291..3483d8c232c4 100644 --- a/rust/kernel/list/arc.rs +++ b/rust/kernel/list/arc.rs @@ -441,9 +441,6 @@ where      }  } -// This is to allow [`ListArc`] (and variants) to be used as the type of `self`. -impl<T, const ID: u64> core::ops::Receiver for ListArc<T, ID> where T: ListArcSafe<ID> + ?Sized {} -  // This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the  // dynamically-sized type (DST) `U`.  impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID> diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs index 2330f673427a..c4b9dd503982 100644 --- a/rust/kernel/list/arc_field.rs +++ b/rust/kernel/list/arc_field.rs @@ -56,7 +56,7 @@ impl<T, const ID: u64> ListArcField<T, ID> {      ///      /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this      /// field for the duration of the returned reference. -    #[allow(clippy::mut_from_ref)] +    #[expect(clippy::mut_from_ref)]      pub unsafe fn assert_mut(&self) -> &mut T {          // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive          // access to this field. diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs index 801907fba199..b89c681d97c0 100644 --- a/rust/kernel/net/phy.rs +++ b/rust/kernel/net/phy.rs @@ -314,7 +314,7 @@ impl<T: Driver> Adapter<T> {      /// `phydev` must be passed by the corresponding callback in `phy_driver`.      unsafe extern "C" fn soft_reset_callback(          phydev: *mut bindings::phy_device, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: This callback is called only in contexts              // where we hold `phy_device->lock`, so the accessors on @@ -328,7 +328,7 @@ impl<T: Driver> Adapter<T> {      /// # Safety      ///      /// `phydev` must be passed by the corresponding callback in `phy_driver`. -    unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { +    unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: This callback is called only in contexts              // where we can exclusively access `phy_device` because @@ -345,7 +345,7 @@ impl<T: Driver> Adapter<T> {      /// `phydev` must be passed by the corresponding callback in `phy_driver`.      unsafe extern "C" fn get_features_callback(          phydev: *mut bindings::phy_device, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: This callback is called only in contexts              // where we hold `phy_device->lock`, so the accessors on @@ -359,7 +359,7 @@ impl<T: Driver> Adapter<T> {      /// # Safety      ///      /// `phydev` must be passed by the corresponding callback in `phy_driver`. -    unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { +    unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: The C core code ensures that the accessors on              // `Device` are okay to call even though `phy_device->lock` @@ -373,7 +373,7 @@ impl<T: Driver> Adapter<T> {      /// # Safety      ///      /// `phydev` must be passed by the corresponding callback in `phy_driver`. -    unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { +    unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: The C core code ensures that the accessors on              // `Device` are okay to call even though `phy_device->lock` @@ -389,7 +389,7 @@ impl<T: Driver> Adapter<T> {      /// `phydev` must be passed by the corresponding callback in `phy_driver`.      unsafe extern "C" fn config_aneg_callback(          phydev: *mut bindings::phy_device, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: This callback is called only in contexts              // where we hold `phy_device->lock`, so the accessors on @@ -405,7 +405,7 @@ impl<T: Driver> Adapter<T> {      /// `phydev` must be passed by the corresponding callback in `phy_driver`.      unsafe extern "C" fn read_status_callback(          phydev: *mut bindings::phy_device, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          from_result(|| {              // SAFETY: This callback is called only in contexts              // where we hold `phy_device->lock`, so the accessors on @@ -421,7 +421,7 @@ impl<T: Driver> Adapter<T> {      /// `phydev` must be passed by the corresponding callback in `phy_driver`.      unsafe extern "C" fn match_phy_device_callback(          phydev: *mut bindings::phy_device, -    ) -> core::ffi::c_int { +    ) -> crate::ffi::c_int {          // SAFETY: This callback is called only in contexts          // where we hold `phy_device->lock`, so the accessors on          // `Device` are okay to call. diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs index 208a006d587c..fdac6c375fe4 100644 --- a/rust/kernel/page.rs +++ b/rust/kernel/page.rs @@ -20,6 +20,16 @@ pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;  /// A bitmask that gives the page containing a given address.  pub const PAGE_MASK: usize = !(PAGE_SIZE - 1); +/// Round up the given number to the next multiple of [`PAGE_SIZE`]. +/// +/// It is incorrect to pass an address where the next multiple of [`PAGE_SIZE`] doesn't fit in a +/// [`usize`]. +pub const fn page_align(addr: usize) -> usize { +    // Parentheses around `PAGE_SIZE - 1` to avoid triggering overflow sanitizers in the wrong +    // cases. +    (addr + (PAGE_SIZE - 1)) & PAGE_MASK +} +  /// A pointer to a page that owns the page allocation.  ///  /// # Invariants diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs index 4571daec0961..8bdab9aa0d16 100644 --- a/rust/kernel/prelude.rs +++ b/rust/kernel/prelude.rs @@ -14,10 +14,7 @@  #[doc(no_inline)]  pub use core::pin::Pin; -pub use crate::alloc::{box_ext::BoxExt, flags::*, vec_ext::VecExt}; - -#[doc(no_inline)] -pub use alloc::{boxed::Box, vec::Vec}; +pub use crate::alloc::{flags::*, Box, KBox, KVBox, KVVec, KVec, VBox, VVec, Vec};  #[doc(no_inline)]  pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable}; diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index 508b0221256c..a28077a7cb30 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -14,6 +14,7 @@ use core::{  use crate::str::RawFormatter;  // Called from `vsprintf` with format specifier `%pA`. +#[expect(clippy::missing_safety_doc)]  #[no_mangle]  unsafe extern "C" fn rust_fmt_argument(      buf: *mut c_char, @@ -23,6 +24,7 @@ unsafe extern "C" fn rust_fmt_argument(      use fmt::Write;      // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.      let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) }; +    // SAFETY: TODO.      let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });      w.pos().cast()  } @@ -102,6 +104,7 @@ pub unsafe fn call_printk(  ) {      // `_printk` does not seem to fail in any path.      #[cfg(CONFIG_PRINTK)] +    // SAFETY: TODO.      unsafe {          bindings::_printk(              format_string.as_ptr() as _, @@ -137,7 +140,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {  #[doc(hidden)]  #[cfg(not(testlib))]  #[macro_export] -#[allow(clippy::crate_in_macro_def)] +#[expect(clippy::crate_in_macro_def)]  macro_rules! print_macro (      // The non-continuation cases (most of them, e.g. `INFO`).      ($format_string:path, false, $($arg:tt)+) => ( diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs index 25eb36fd1cdc..cb4415a12258 100644 --- a/rust/kernel/rbtree.rs +++ b/rust/kernel/rbtree.rs @@ -7,7 +7,6 @@  //! Reference: <https://docs.kernel.org/core-api/rbtree.html>  use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*}; -use alloc::boxed::Box;  use core::{      cmp::{Ord, Ordering},      marker::PhantomData, @@ -497,7 +496,7 @@ impl<K, V> Drop for RBTree<K, V> {              // but it is not observable. The loop invariant is still maintained.              // SAFETY: `this` is valid per the loop invariant. -            unsafe { drop(Box::from_raw(this.cast_mut())) }; +            unsafe { drop(KBox::from_raw(this.cast_mut())) };          }      }  } @@ -764,7 +763,7 @@ impl<'a, K, V> Cursor<'a, K, V> {          // point to the links field of `Node<K, V>` objects.          let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();          // SAFETY: `this` is valid by the type invariants as described above. -        let node = unsafe { Box::from_raw(this) }; +        let node = unsafe { KBox::from_raw(this) };          let node = RBTreeNode { node };          // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so          // the tree cannot change. By the tree invariant, all nodes are valid. @@ -809,7 +808,7 @@ impl<'a, K, V> Cursor<'a, K, V> {              // point to the links field of `Node<K, V>` objects.              let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();              // SAFETY: `this` is valid by the type invariants as described above. -            let node = unsafe { Box::from_raw(this) }; +            let node = unsafe { KBox::from_raw(this) };              return Some(RBTreeNode { node });          }          None @@ -884,7 +883,8 @@ impl<'a, K, V> Cursor<'a, K, V> {          NonNull::new(neighbor)      } -    /// SAFETY: +    /// # Safety +    ///      /// - `node` must be a valid pointer to a node in an [`RBTree`].      /// - The caller has immutable access to `node` for the duration of 'b.      unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) { @@ -894,7 +894,8 @@ impl<'a, K, V> Cursor<'a, K, V> {          (k, unsafe { &*v })      } -    /// SAFETY: +    /// # Safety +    ///      /// - `node` must be a valid pointer to a node in an [`RBTree`].      /// - The caller has mutable access to `node` for the duration of 'b.      unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) { @@ -904,7 +905,8 @@ impl<'a, K, V> Cursor<'a, K, V> {          (k, unsafe { &mut *v })      } -    /// SAFETY: +    /// # Safety +    ///      /// - `node` must be a valid pointer to a node in an [`RBTree`].      /// - The caller has immutable access to the key for the duration of 'b.      unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) { @@ -1035,7 +1037,7 @@ impl<K, V> Iterator for IterRaw<K, V> {  /// It contains the memory needed to hold a node that can be inserted into a red-black tree. One  /// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).  pub struct RBTreeNodeReservation<K, V> { -    node: Box<MaybeUninit<Node<K, V>>>, +    node: KBox<MaybeUninit<Node<K, V>>>,  }  impl<K, V> RBTreeNodeReservation<K, V> { @@ -1043,7 +1045,7 @@ impl<K, V> RBTreeNodeReservation<K, V> {      /// call to [`RBTree::insert`].      pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {          Ok(RBTreeNodeReservation { -            node: <Box<_> as BoxExt<_>>::new_uninit(flags)?, +            node: KBox::new_uninit(flags)?,          })      }  } @@ -1059,14 +1061,15 @@ impl<K, V> RBTreeNodeReservation<K, V> {      /// Initialises a node reservation.      ///      /// It then becomes an [`RBTreeNode`] that can be inserted into a tree. -    pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> { -        self.node.write(Node { -            key, -            value, -            links: bindings::rb_node::default(), -        }); -        // SAFETY: We just wrote to it. -        let node = unsafe { self.node.assume_init() }; +    pub fn into_node(self, key: K, value: V) -> RBTreeNode<K, V> { +        let node = KBox::write( +            self.node, +            Node { +                key, +                value, +                links: bindings::rb_node::default(), +            }, +        );          RBTreeNode { node }      }  } @@ -1076,7 +1079,7 @@ impl<K, V> RBTreeNodeReservation<K, V> {  /// The node is fully initialised (with key and value) and can be inserted into a tree without any  /// extra allocations or failure paths.  pub struct RBTreeNode<K, V> { -    node: Box<Node<K, V>>, +    node: KBox<Node<K, V>>,  }  impl<K, V> RBTreeNode<K, V> { @@ -1088,7 +1091,9 @@ impl<K, V> RBTreeNode<K, V> {      /// Get the key and value from inside the node.      pub fn to_key_value(self) -> (K, V) { -        (self.node.key, self.node.value) +        let node = KBox::into_inner(self.node); + +        (node.key, node.value)      }  } @@ -1110,7 +1115,7 @@ impl<K, V> RBTreeNode<K, V> {      /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).      pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {          RBTreeNodeReservation { -            node: Box::drop_contents(self.node), +            node: KBox::drop_contents(self.node),          }      }  } @@ -1161,7 +1166,7 @@ impl<'a, K, V> RawVacantEntry<'a, K, V> {      /// The `node` must have a key such that inserting it here does not break the ordering of this      /// [`RBTree`].      fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V { -        let node = Box::into_raw(node.node); +        let node = KBox::into_raw(node.node);          // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when          // the node is removed or replaced. @@ -1235,21 +1240,24 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> {              // SAFETY: The node was a node in the tree, but we removed it, so we can convert it              // back into a box.              node: unsafe { -                Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) +                KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())              },          }      }      /// Takes the value of the entry out of the map, and returns it.      pub fn remove(self) -> V { -        self.remove_node().node.value +        let rb_node = self.remove_node(); +        let node = KBox::into_inner(rb_node.node); + +        node.value      }      /// Swap the current node for the provided node.      ///      /// The key of both nodes must be equal.      fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> { -        let node = Box::into_raw(node.node); +        let node = KBox::into_raw(node.node);          // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when          // the node is removed or replaced. @@ -1265,7 +1273,7 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> {          // - `self.node_ptr` produces a valid pointer to a node in the tree.          // - Now that we removed this entry from the tree, we can convert the node to a box.          let old_node = -            unsafe { Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) }; +            unsafe { KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };          RBTreeNode { node: old_node }      } diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs index 67bf9d37ddb5..279bd353687a 100644 --- a/rust/kernel/std_vendor.rs +++ b/rust/kernel/std_vendor.rs @@ -1,5 +1,7 @@  // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Rust standard library vendored code. +//!  //! The contents of this file come from the Rust standard library, hosted in  //! the <https://github.com/rust-lang/rust> repository, licensed under  //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, @@ -14,9 +16,9 @@  ///  /// ```rust  /// let a = 2; -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)]  /// let b = dbg!(a * 2) + 1; -/// //      ^-- prints: [src/main.rs:2] a * 2 = 4 +/// //      ^-- prints: [src/main.rs:3:9] a * 2 = 4  /// assert_eq!(b, 5);  /// ```  /// @@ -52,7 +54,7 @@  /// With a method call:  ///  /// ```rust -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)]  /// fn foo(n: usize) {  ///     if dbg!(n.checked_sub(4)).is_some() {  ///         // ... @@ -65,14 +67,13 @@  /// This prints to the kernel log:  ///  /// ```text,ignore -/// [src/main.rs:4] n.checked_sub(4) = None +/// [src/main.rs:3:8] n.checked_sub(4) = None  /// ```  ///  /// Naive factorial implementation:  ///  /// ```rust -/// # #[allow(clippy::dbg_macro)] -/// # { +/// # #![expect(clippy::disallowed_macros)]  /// fn factorial(n: u32) -> u32 {  ///     if dbg!(n <= 1) {  ///         dbg!(1) @@ -82,21 +83,20 @@  /// }  ///  /// dbg!(factorial(4)); -/// # }  /// ```  ///  /// This prints to the kernel log:  ///  /// ```text,ignore -/// [src/main.rs:3] n <= 1 = false -/// [src/main.rs:3] n <= 1 = false -/// [src/main.rs:3] n <= 1 = false -/// [src/main.rs:3] n <= 1 = true -/// [src/main.rs:4] 1 = 1 -/// [src/main.rs:5] n * factorial(n - 1) = 2 -/// [src/main.rs:5] n * factorial(n - 1) = 6 -/// [src/main.rs:5] n * factorial(n - 1) = 24 -/// [src/main.rs:11] factorial(4) = 24 +/// [src/main.rs:3:8] n <= 1 = false +/// [src/main.rs:3:8] n <= 1 = false +/// [src/main.rs:3:8] n <= 1 = false +/// [src/main.rs:3:8] n <= 1 = true +/// [src/main.rs:4:9] 1 = 1 +/// [src/main.rs:5:9] n * factorial(n - 1) = 2 +/// [src/main.rs:5:9] n * factorial(n - 1) = 6 +/// [src/main.rs:5:9] n * factorial(n - 1) = 24 +/// [src/main.rs:11:1] factorial(4) = 24  /// ```  ///  /// The `dbg!(..)` macro moves the input: @@ -118,7 +118,7 @@  /// a tuple (and return it, too):  ///  /// ``` -/// # #[allow(clippy::dbg_macro)] +/// # #![expect(clippy::disallowed_macros)]  /// assert_eq!(dbg!(1usize, 2u32), (1, 2));  /// ```  /// @@ -127,11 +127,9 @@  /// invocations. You can use a 1-tuple directly if you need one:  ///  /// ``` -/// # #[allow(clippy::dbg_macro)] -/// # { +/// # #![expect(clippy::disallowed_macros)]  /// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored  /// assert_eq!((1,), dbg!((1u32,))); // 1-tuple -/// # }  /// ```  ///  /// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs index bb8d4f41475b..d04c12a1426d 100644 --- a/rust/kernel/str.rs +++ b/rust/kernel/str.rs @@ -2,8 +2,7 @@  //! String representations. -use crate::alloc::{flags::*, vec_ext::VecExt, AllocError}; -use alloc::vec::Vec; +use crate::alloc::{flags::*, AllocError, KVec};  use core::fmt::{self, Write};  use core::ops::{self, Deref, DerefMut, Index}; @@ -162,10 +161,10 @@ impl CStr {      /// Returns the length of this string with `NUL`.      #[inline]      pub const fn len_with_nul(&self) -> usize { -        // SAFETY: This is one of the invariant of `CStr`. -        // We add a `unreachable_unchecked` here to hint the optimizer that -        // the value returned from this function is non-zero.          if self.0.is_empty() { +            // SAFETY: This is one of the invariant of `CStr`. +            // We add a `unreachable_unchecked` here to hint the optimizer that +            // the value returned from this function is non-zero.              unsafe { core::hint::unreachable_unchecked() };          }          self.0.len() @@ -185,7 +184,7 @@ impl CStr {      /// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`      /// must not be mutated.      #[inline] -    pub unsafe fn from_char_ptr<'a>(ptr: *const core::ffi::c_char) -> &'a Self { +    pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {          // SAFETY: The safety precondition guarantees `ptr` is a valid pointer          // to a `NUL`-terminated C string.          let len = unsafe { bindings::strlen(ptr) } + 1; @@ -248,7 +247,7 @@ impl CStr {      /// Returns a C pointer to the string.      #[inline] -    pub const fn as_char_ptr(&self) -> *const core::ffi::c_char { +    pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {          self.0.as_ptr() as _      } @@ -301,6 +300,7 @@ impl CStr {      /// ```      #[inline]      pub unsafe fn as_str_unchecked(&self) -> &str { +        // SAFETY: TODO.          unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }      } @@ -524,7 +524,28 @@ macro_rules! c_str {  #[cfg(test)]  mod tests {      use super::*; -    use alloc::format; + +    struct String(CString); + +    impl String { +        fn from_fmt(args: fmt::Arguments<'_>) -> Self { +            String(CString::try_from_fmt(args).unwrap()) +        } +    } + +    impl Deref for String { +        type Target = str; + +        fn deref(&self) -> &str { +            self.0.to_str().unwrap() +        } +    } + +    macro_rules! format { +        ($($f:tt)*) => ({ +            &*String::from_fmt(kernel::fmt!($($f)*)) +        }) +    }      const ALL_ASCII_CHARS: &'static str =          "\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\ @@ -790,7 +811,7 @@ impl fmt::Write for Formatter {  /// assert_eq!(s.is_ok(), false);  /// ```  pub struct CString { -    buf: Vec<u8>, +    buf: KVec<u8>,  }  impl CString { @@ -803,7 +824,7 @@ impl CString {          let size = f.bytes_written();          // Allocate a vector with the required number of bytes, and write to it. -        let mut buf = <Vec<_> as VecExt<_>>::with_capacity(size, GFP_KERNEL)?; +        let mut buf = KVec::with_capacity(size, GFP_KERNEL)?;          // SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes.          let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) };          f.write_fmt(args)?; @@ -850,10 +871,9 @@ impl<'a> TryFrom<&'a CStr> for CString {      type Error = AllocError;      fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> { -        let mut buf = Vec::new(); +        let mut buf = KVec::new(); -        <Vec<_> as VecExt<_>>::extend_from_slice(&mut buf, cstr.as_bytes_with_nul(), GFP_KERNEL) -            .map_err(|_| AllocError)?; +        buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?;          // INVARIANT: The `CStr` and `CString` types have the same invariants for          // the string data, and we copied it over without changes. diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index bae4a5179c72..1eab7ebf25fd 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -15,6 +15,7 @@ pub mod poll;  pub use arc::{Arc, ArcBorrow, UniqueArc};  pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; +pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};  pub use lock::mutex::{new_mutex, Mutex};  pub use lock::spinlock::{new_spinlock, SpinLock};  pub use locked_by::LockedBy; diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 3021f30fd822..fa4509406ee9 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -17,13 +17,12 @@  //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html  use crate::{ -    alloc::{box_ext::BoxExt, AllocError, Flags}, +    alloc::{AllocError, Flags, KBox},      bindings,      init::{self, InPlaceInit, Init, PinInit},      try_init,      types::{ForeignOwnable, Opaque},  }; -use alloc::boxed::Box;  use core::{      alloc::Layout,      fmt, @@ -171,9 +170,6 @@ impl<T: ?Sized> ArcInner<T> {      }  } -// This is to allow [`Arc`] (and variants) to be used as the type of `self`. -impl<T: ?Sized> core::ops::Receiver for Arc<T> {} -  // This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the  // dynamically-sized type (DST) `U`.  impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {} @@ -204,11 +200,11 @@ impl<T> Arc<T> {              data: contents,          }; -        let inner = <Box<_> as BoxExt<_>>::new(value, flags)?; +        let inner = KBox::new(value, flags)?;          // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new          // `Arc` object. -        Ok(unsafe { Self::from_inner(Box::leak(inner).into()) }) +        Ok(unsafe { Self::from_inner(KBox::leak(inner).into()) })      }  } @@ -336,12 +332,12 @@ impl<T: ?Sized> Arc<T> {  impl<T: 'static> ForeignOwnable for Arc<T> {      type Borrowed<'a> = ArcBorrow<'a, T>; -    fn into_foreign(self) -> *const core::ffi::c_void { +    fn into_foreign(self) -> *const crate::ffi::c_void {          ManuallyDrop::new(self).ptr.as_ptr() as _      } -    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> { -        // SAFETY: By the safety requirement of this function, we know that `ptr` came from +    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> ArcBorrow<'a, T> { +        // By the safety requirement of this function, we know that `ptr` came from          // a previous call to `Arc::into_foreign`.          let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap(); @@ -350,7 +346,7 @@ impl<T: 'static> ForeignOwnable for Arc<T> {          unsafe { ArcBorrow::new(inner) }      } -    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { +    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self {          // SAFETY: By the safety requirement of this function, we know that `ptr` came from          // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and          // holds a reference count increment that is transferrable to us. @@ -401,8 +397,8 @@ impl<T: ?Sized> Drop for Arc<T> {          if is_zero {              // The count reached zero, we must free the memory.              // -            // SAFETY: The pointer was initialised from the result of `Box::leak`. -            unsafe { drop(Box::from_raw(self.ptr.as_ptr())) }; +            // SAFETY: The pointer was initialised from the result of `KBox::leak`. +            unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) };          }      }  } @@ -480,9 +476,6 @@ pub struct ArcBorrow<'a, T: ?Sized + 'a> {      _p: PhantomData<&'a ()>,  } -// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`. -impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {} -  // This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into  // `ArcBorrow<U>`.  impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>> @@ -647,7 +640,7 @@ impl<T> UniqueArc<T> {      /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.      pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {          // INVARIANT: The refcount is initialised to a non-zero value. -        let inner = Box::try_init::<AllocError>( +        let inner = KBox::try_init::<AllocError>(              try_init!(ArcInner {                  // SAFETY: There are no safety requirements for this FFI call.                  refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), @@ -657,8 +650,8 @@ impl<T> UniqueArc<T> {          )?;          Ok(UniqueArc {              // INVARIANT: The newly-created object has a refcount of 1. -            // SAFETY: The pointer from the `Box` is valid. -            inner: unsafe { Arc::from_inner(Box::leak(inner).into()) }, +            // SAFETY: The pointer from the `KBox` is valid. +            inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) },          })      }  } diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs index a66a0c2831b3..11b3f4ecca5f 100644 --- a/rust/kernel/sync/arc/std_vendor.rs +++ b/rust/kernel/sync/arc/std_vendor.rs @@ -1,5 +1,7 @@  // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Rust standard library vendored code. +//!  //! The contents of this file come from the Rust standard library, hosted in  //! the <https://github.com/rust-lang/rust> repository, licensed under  //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index 2b306afbe56d..7df565038d7d 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -7,6 +7,7 @@  use super::{lock::Backend, lock::Guard, LockClassKey};  use crate::{ +    ffi::{c_int, c_long},      init::PinInit,      pin_init,      str::CStr, @@ -14,7 +15,6 @@ use crate::{      time::Jiffies,      types::Opaque,  }; -use core::ffi::{c_int, c_long};  use core::marker::PhantomPinned;  use core::ptr;  use macros::pin_data; @@ -70,8 +70,8 @@ pub use new_condvar;  /// }  ///  /// /// Allocates a new boxed `Example`. -/// fn new_example() -> Result<Pin<Box<Example>>> { -///     Box::pin_init(pin_init!(Example { +/// fn new_example() -> Result<Pin<KBox<Example>>> { +///     KBox::pin_init(pin_init!(Example {  ///         value <- new_mutex!(0),  ///         value_changed <- new_condvar!(),  ///     }), GFP_KERNEL) @@ -93,7 +93,6 @@ pub struct CondVar {  }  // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. -#[allow(clippy::non_send_fields_in_send_ty)]  unsafe impl Send for CondVar {}  // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index d6e9bab114b8..41dcddac69e2 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -18,6 +18,9 @@ use macros::pin_data;  pub mod mutex;  pub mod spinlock; +pub(super) mod global; +pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy}; +  /// The "backend" of a lock.  ///  /// It is the actual implementation of the lock, without the need to repeat patterns used in all @@ -51,7 +54,7 @@ pub unsafe trait Backend {      /// remain valid for read indefinitely.      unsafe fn init(          ptr: *mut Self::State, -        name: *const core::ffi::c_char, +        name: *const crate::ffi::c_char,          key: *mut bindings::lock_class_key,      ); @@ -63,6 +66,13 @@ pub unsafe trait Backend {      #[must_use]      unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; +    /// Tries to acquire the lock. +    /// +    /// # Safety +    /// +    /// Callers must ensure that [`Backend::init`] has been previously called. +    unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>; +      /// Releases the lock, giving up its ownership.      ///      /// # Safety @@ -133,6 +143,15 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {          // SAFETY: The lock was just acquired.          unsafe { Guard::new(self, state) }      } + +    /// Tries to acquire the lock. +    /// +    /// Returns a guard that can be used to access the data protected by the lock if successful. +    pub fn try_lock(&self) -> Option<Guard<'_, T, B>> { +        // SAFETY: The constructor of the type calls `init`, so the existence of the object proves +        // that `init` was called. +        unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) } +    }  }  /// A lock guard. @@ -155,9 +174,9 @@ impl<T: ?Sized, B: Backend> Guard<'_, T, B> {          // SAFETY: The caller owns the lock, so it is safe to unlock it.          unsafe { B::unlock(self.lock.state.get(), &self.state) }; -        // SAFETY: The lock was just unlocked above and is being relocked now. -        let _relock = -            ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); +        let _relock = ScopeGuard::new(|| +                // SAFETY: The lock was just unlocked above and is being relocked now. +                unsafe { B::relock(self.lock.state.get(), &mut self.state) });          cb()      } diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs new file mode 100644 index 000000000000..480ee724e3cc --- /dev/null +++ b/rust/kernel/sync/lock/global.rs @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Support for defining statics containing locks. + +use crate::{ +    str::CStr, +    sync::lock::{Backend, Guard, Lock}, +    sync::{LockClassKey, LockedBy}, +    types::Opaque, +}; +use core::{ +    cell::UnsafeCell, +    marker::{PhantomData, PhantomPinned}, +}; + +/// Trait implemented for marker types for global locks. +/// +/// See [`global_lock!`] for examples. +pub trait GlobalLockBackend { +    /// The name for this global lock. +    const NAME: &'static CStr; +    /// Item type stored in this global lock. +    type Item: 'static; +    /// The backend used for this global lock. +    type Backend: Backend + 'static; +    /// The class for this global lock. +    fn get_lock_class() -> &'static LockClassKey; +} + +/// Type used for global locks. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalLock<B: GlobalLockBackend> { +    inner: Lock<B::Item, B::Backend>, +} + +impl<B: GlobalLockBackend> GlobalLock<B> { +    /// Creates a global lock. +    /// +    /// # Safety +    /// +    /// * Before any other method on this lock is called, [`Self::init`] must be called. +    /// * The type `B` must not be used with any other lock. +    pub const unsafe fn new(data: B::Item) -> Self { +        Self { +            inner: Lock { +                state: Opaque::uninit(), +                data: UnsafeCell::new(data), +                _pin: PhantomPinned, +            }, +        } +    } + +    /// Initializes a global lock. +    /// +    /// # Safety +    /// +    /// Must not be called more than once on a given lock. +    pub unsafe fn init(&'static self) { +        // SAFETY: The pointer to `state` is valid for the duration of this call, and both `name` +        // and `key` are valid indefinitely. The `state` is pinned since we have a `'static` +        // reference to `self`. +        // +        // We have exclusive access to the `state` since the caller of `new` promised to call +        // `init` before using any other methods. As `init` can only be called once, all other +        // uses of this lock must happen after this call. +        unsafe { +            B::Backend::init( +                self.inner.state.get(), +                B::NAME.as_char_ptr(), +                B::get_lock_class().as_ptr(), +            ) +        } +    } + +    /// Lock this global lock. +    pub fn lock(&'static self) -> GlobalGuard<B> { +        GlobalGuard { +            inner: self.inner.lock(), +        } +    } + +    /// Try to lock this global lock. +    pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> { +        Some(GlobalGuard { +            inner: self.inner.try_lock()?, +        }) +    } +} + +/// A guard for a [`GlobalLock`]. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalGuard<B: GlobalLockBackend> { +    inner: Guard<'static, B::Item, B::Backend>, +} + +impl<B: GlobalLockBackend> core::ops::Deref for GlobalGuard<B> { +    type Target = B::Item; + +    fn deref(&self) -> &Self::Target { +        &self.inner +    } +} + +impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> { +    fn deref_mut(&mut self) -> &mut Self::Target { +        &mut self.inner +    } +} + +/// A version of [`LockedBy`] for a [`GlobalLock`]. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalLockedBy<T: ?Sized, B: GlobalLockBackend> { +    _backend: PhantomData<B>, +    value: UnsafeCell<T>, +} + +// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`. +unsafe impl<T, B> Send for GlobalLockedBy<T, B> +where +    T: ?Sized, +    B: GlobalLockBackend, +    LockedBy<T, B::Item>: Send, +{ +} + +// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`. +unsafe impl<T, B> Sync for GlobalLockedBy<T, B> +where +    T: ?Sized, +    B: GlobalLockBackend, +    LockedBy<T, B::Item>: Sync, +{ +} + +impl<T, B: GlobalLockBackend> GlobalLockedBy<T, B> { +    /// Create a new [`GlobalLockedBy`]. +    /// +    /// The provided value will be protected by the global lock indicated by `B`. +    pub fn new(val: T) -> Self { +        Self { +            value: UnsafeCell::new(val), +            _backend: PhantomData, +        } +    } +} + +impl<T: ?Sized, B: GlobalLockBackend> GlobalLockedBy<T, B> { +    /// Access the value immutably. +    /// +    /// The caller must prove shared access to the lock. +    pub fn as_ref<'a>(&'a self, _guard: &'a GlobalGuard<B>) -> &'a T { +        // SAFETY: The lock is globally unique, so there can only be one guard. +        unsafe { &*self.value.get() } +    } + +    /// Access the value mutably. +    /// +    /// The caller must prove shared exclusive to the lock. +    pub fn as_mut<'a>(&'a self, _guard: &'a mut GlobalGuard<B>) -> &'a mut T { +        // SAFETY: The lock is globally unique, so there can only be one guard. +        unsafe { &mut *self.value.get() } +    } + +    /// Access the value mutably directly. +    /// +    /// The caller has exclusive access to this `GlobalLockedBy`, so they do not need to hold the +    /// lock. +    pub fn get_mut(&mut self) -> &mut T { +        self.value.get_mut() +    } +} + +/// Defines a global lock. +/// +/// The global mutex must be initialized before first use. Usually this is done by calling +/// [`GlobalLock::init`] in the module initializer. +/// +/// # Examples +/// +/// A global counter: +/// +/// ``` +/// # mod ex { +/// # use kernel::prelude::*; +/// kernel::sync::global_lock! { +///     // SAFETY: Initialized in module initializer before first use. +///     unsafe(uninit) static MY_COUNTER: Mutex<u32> = 0; +/// } +/// +/// fn increment_counter() -> u32 { +///     let mut guard = MY_COUNTER.lock(); +///     *guard += 1; +///     *guard +/// } +/// +/// impl kernel::Module for MyModule { +///     fn init(_module: &'static ThisModule) -> Result<Self> { +///         // SAFETY: Called exactly once. +///         unsafe { MY_COUNTER.init() }; +/// +///         Ok(MyModule {}) +///     } +/// } +/// # struct MyModule {} +/// # } +/// ``` +/// +/// A global mutex used to protect all instances of a given struct: +/// +/// ``` +/// # mod ex { +/// # use kernel::prelude::*; +/// use kernel::sync::{GlobalGuard, GlobalLockedBy}; +/// +/// kernel::sync::global_lock! { +///     // SAFETY: Initialized in module initializer before first use. +///     unsafe(uninit) static MY_MUTEX: Mutex<()> = (); +/// } +/// +/// /// All instances of this struct are protected by `MY_MUTEX`. +/// struct MyStruct { +///     my_counter: GlobalLockedBy<u32, MY_MUTEX>, +/// } +/// +/// impl MyStruct { +///     /// Increment the counter in this instance. +///     /// +///     /// The caller must hold the `MY_MUTEX` mutex. +///     fn increment(&self, guard: &mut GlobalGuard<MY_MUTEX>) -> u32 { +///         let my_counter = self.my_counter.as_mut(guard); +///         *my_counter += 1; +///         *my_counter +///     } +/// } +/// +/// impl kernel::Module for MyModule { +///     fn init(_module: &'static ThisModule) -> Result<Self> { +///         // SAFETY: Called exactly once. +///         unsafe { MY_MUTEX.init() }; +/// +///         Ok(MyModule {}) +///     } +/// } +/// # struct MyModule {} +/// # } +/// ``` +#[macro_export] +macro_rules! global_lock { +    { +        $(#[$meta:meta])* $pub:vis +        unsafe(uninit) static $name:ident: $kind:ident<$valuety:ty> = $value:expr; +    } => { +        #[doc = ::core::concat!( +            "Backend type used by [`", +            ::core::stringify!($name), +            "`](static@", +            ::core::stringify!($name), +            ")." +        )] +        #[allow(non_camel_case_types, unreachable_pub)] +        $pub enum $name {} + +        impl $crate::sync::lock::GlobalLockBackend for $name { +            const NAME: &'static $crate::str::CStr = $crate::c_str!(::core::stringify!($name)); +            type Item = $valuety; +            type Backend = $crate::global_lock_inner!(backend $kind); + +            fn get_lock_class() -> &'static $crate::sync::LockClassKey { +                $crate::static_lock_class!() +            } +        } + +        $(#[$meta])* +        $pub static $name: $crate::sync::lock::GlobalLock<$name> = { +            // Defined here to be outside the unsafe scope. +            let init: $valuety = $value; + +            // SAFETY: +            // * The user of this macro promises to initialize the macro before use. +            // * We are only generating one static with this backend type. +            unsafe { $crate::sync::lock::GlobalLock::new(init) } +        }; +    }; +} +pub use global_lock; + +#[doc(hidden)] +#[macro_export] +macro_rules! global_lock_inner { +    (backend Mutex) => { +        $crate::sync::lock::mutex::MutexBackend +    }; +    (backend SpinLock) => { +        $crate::sync::lock::spinlock::SpinLockBackend +    }; +} diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index 30632070ee67..0e946ebefce1 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -58,7 +58,7 @@ pub use new_mutex;  /// }  ///  /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;  /// assert_eq!(e.c, 10);  /// assert_eq!(e.d.lock().a, 20);  /// assert_eq!(e.d.lock().b, 30); @@ -96,7 +96,7 @@ unsafe impl super::Backend for MutexBackend {      unsafe fn init(          ptr: *mut Self::State, -        name: *const core::ffi::c_char, +        name: *const crate::ffi::c_char,          key: *mut bindings::lock_class_key,      ) {          // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and @@ -115,4 +115,15 @@ unsafe impl super::Backend for MutexBackend {          // caller is the owner of the mutex.          unsafe { bindings::mutex_unlock(ptr) };      } + +    unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> { +        // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. +        let result = unsafe { bindings::mutex_trylock(ptr) }; + +        if result != 0 { +            Some(()) +        } else { +            None +        } +    }  } diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index ea5c5bc1ce12..9f4d128bed98 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -56,7 +56,7 @@ pub use new_spinlock;  /// }  ///  /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;  /// assert_eq!(e.c, 10);  /// assert_eq!(e.d.lock().a, 20);  /// assert_eq!(e.d.lock().b, 30); @@ -95,7 +95,7 @@ unsafe impl super::Backend for SpinLockBackend {      unsafe fn init(          ptr: *mut Self::State, -        name: *const core::ffi::c_char, +        name: *const crate::ffi::c_char,          key: *mut bindings::lock_class_key,      ) {          // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and @@ -114,4 +114,15 @@ unsafe impl super::Backend for SpinLockBackend {          // caller is the owner of the spinlock.          unsafe { bindings::spin_unlock(ptr) }      } + +    unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> { +        // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. +        let result = unsafe { bindings::spin_trylock(ptr) }; + +        if result != 0 { +            Some(()) +        } else { +            None +        } +    }  } diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs index ce2ee8d87865..a7b244675c2b 100644 --- a/rust/kernel/sync/locked_by.rs +++ b/rust/kernel/sync/locked_by.rs @@ -43,7 +43,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};  /// struct InnerDirectory {  ///     /// The sum of the bytes used by all files.  ///     bytes_used: u64, -///     _files: Vec<File>, +///     _files: KVec<File>,  /// }  ///  /// struct Directory { diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index c1163237a4aa..7a76be583126 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -9,12 +9,8 @@ use crate::{      pid_namespace::PidNamespace,      types::{ARef, NotThreadSafe, Opaque},  }; -use core::{ -    cmp::{Eq, PartialEq}, -    ffi::{c_int, c_long, c_uint}, -    ops::Deref, -    ptr, -}; +use crate::ffi::{c_int, c_long, c_uint}; +use core::{cmp::{Eq, PartialEq},ops::Deref, ptr};  /// A sentinel value used for infinite timeouts.  pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX; diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs index e3bb5e89f88d..379c0f5772e5 100644 --- a/rust/kernel/time.rs +++ b/rust/kernel/time.rs @@ -12,10 +12,10 @@  pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;  /// The time unit of Linux kernel. One jiffy equals (1/HZ) second. -pub type Jiffies = core::ffi::c_ulong; +pub type Jiffies = crate::ffi::c_ulong;  /// The millisecond time unit. -pub type Msecs = core::ffi::c_uint; +pub type Msecs = crate::ffi::c_uint;  /// Converts milliseconds to jiffies.  #[inline] diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs new file mode 100644 index 000000000000..1c7d43771a37 --- /dev/null +++ b/rust/kernel/transmute.rs @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Traits for transmuting types. + +/// Types for which any bit pattern is valid. +/// +/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so +/// reading arbitrary bytes into something that contains a `bool` is not okay. +/// +/// It's okay for the type to have padding, as initializing those bytes has no effect. +/// +/// # Safety +/// +/// All bit-patterns must be valid for this type. This type must not have interior mutability. +pub unsafe trait FromBytes {} + +macro_rules! impl_frombytes { +    ($($({$($generics:tt)*})? $t:ty, )*) => { +        // SAFETY: Safety comments written in the macro invocation. +        $(unsafe impl$($($generics)*)? FromBytes for $t {})* +    }; +} + +impl_frombytes! { +    // SAFETY: All bit patterns are acceptable values of the types below. +    u8, u16, u32, u64, usize, +    i8, i16, i32, i64, isize, + +    // SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit +    // patterns are also acceptable for arrays of that type. +    {<T: FromBytes>} [T], +    {<T: FromBytes, const N: usize>} [T; N], +} + +/// Types that can be viewed as an immutable slice of initialized bytes. +/// +/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This +/// means that it should not have any padding, as padding bytes are uninitialized. Reading +/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive +/// information on the stack to userspace. +/// +/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered +/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so +/// this is a correctness requirement, but not a safety requirement. +/// +/// # Safety +/// +/// Values of this type may not contain any uninitialized bytes. This type must not have interior +/// mutability. +pub unsafe trait AsBytes {} + +macro_rules! impl_asbytes { +    ($($({$($generics:tt)*})? $t:ty, )*) => { +        // SAFETY: Safety comments written in the macro invocation. +        $(unsafe impl$($($generics)*)? AsBytes for $t {})* +    }; +} + +impl_asbytes! { +    // SAFETY: Instances of the following types have no uninitialized portions. +    u8, u16, u32, u64, usize, +    i8, i16, i32, i64, isize, +    bool, +    char, +    str, + +    // SAFETY: If individual values in an array have no uninitialized portions, then the array +    // itself does not have any uninitialized portions either. +    {<T: AsBytes>} [T], +    {<T: AsBytes, const N: usize>} [T; N], +} diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index 3238ffaab031..7a133d938551 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -3,13 +3,11 @@  //! Kernel types.  use crate::init::{self, PinInit}; -use alloc::boxed::Box;  use core::{      cell::UnsafeCell,      marker::{PhantomData, PhantomPinned},      mem::{ManuallyDrop, MaybeUninit},      ops::{Deref, DerefMut}, -    pin::Pin,      ptr::NonNull,  }; @@ -31,7 +29,7 @@ pub trait ForeignOwnable: Sized {      /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in      /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],      /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior. -    fn into_foreign(self) -> *const core::ffi::c_void; +    fn into_foreign(self) -> *const crate::ffi::c_void;      /// Borrows a foreign-owned object.      /// @@ -39,7 +37,7 @@ pub trait ForeignOwnable: Sized {      ///      /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for      /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. -    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>; +    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Self::Borrowed<'a>;      /// Converts a foreign-owned object back to a Rust-owned one.      /// @@ -49,7 +47,7 @@ pub trait ForeignOwnable: Sized {      /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.      /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for      /// this object must have been dropped. -    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self; +    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self;      /// Tries to convert a foreign-owned object back to a Rust-owned one.      /// @@ -60,7 +58,7 @@ pub trait ForeignOwnable: Sized {      ///      /// `ptr` must either be null or satisfy the safety requirements for      /// [`ForeignOwnable::from_foreign`]. -    unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> { +    unsafe fn try_from_foreign(ptr: *const crate::ffi::c_void) -> Option<Self> {          if ptr.is_null() {              None          } else { @@ -71,64 +69,16 @@ pub trait ForeignOwnable: Sized {      }  } -impl<T: 'static> ForeignOwnable for Box<T> { -    type Borrowed<'a> = &'a T; - -    fn into_foreign(self) -> *const core::ffi::c_void { -        Box::into_raw(self) as _ -    } - -    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T { -        // SAFETY: The safety requirements for this function ensure that the object is still alive, -        // so it is safe to dereference the raw pointer. -        // The safety requirements of `from_foreign` also ensure that the object remains alive for -        // the lifetime of the returned value. -        unsafe { &*ptr.cast() } -    } - -    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { -        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous -        // call to `Self::into_foreign`. -        unsafe { Box::from_raw(ptr as _) } -    } -} - -impl<T: 'static> ForeignOwnable for Pin<Box<T>> { -    type Borrowed<'a> = Pin<&'a T>; - -    fn into_foreign(self) -> *const core::ffi::c_void { -        // SAFETY: We are still treating the box as pinned. -        Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _ -    } - -    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> { -        // SAFETY: The safety requirements for this function ensure that the object is still alive, -        // so it is safe to dereference the raw pointer. -        // The safety requirements of `from_foreign` also ensure that the object remains alive for -        // the lifetime of the returned value. -        let r = unsafe { &*ptr.cast() }; - -        // SAFETY: This pointer originates from a `Pin<Box<T>>`. -        unsafe { Pin::new_unchecked(r) } -    } - -    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { -        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous -        // call to `Self::into_foreign`. -        unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) } -    } -} -  impl ForeignOwnable for () {      type Borrowed<'a> = (); -    fn into_foreign(self) -> *const core::ffi::c_void { +    fn into_foreign(self) -> *const crate::ffi::c_void {          core::ptr::NonNull::dangling().as_ptr()      } -    unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {} +    unsafe fn borrow<'a>(_: *const crate::ffi::c_void) -> Self::Borrowed<'a> {} -    unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {} +    unsafe fn from_foreign(_: *const crate::ffi::c_void) -> Self {}  }  /// Runs a cleanup function/closure when dropped. @@ -185,7 +135,7 @@ impl ForeignOwnable for () {  /// # use kernel::types::ScopeGuard;  /// fn example3(arg: bool) -> Result {  ///     let mut vec = -///         ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len())); +///         ScopeGuard::new_with_data(KVec::new(), |v| pr_info!("vec had {} elements\n", v.len()));  ///  ///     vec.push(10u8, GFP_KERNEL)?;  ///     if arg { @@ -225,7 +175,7 @@ impl<T, F: FnOnce(T)> ScopeGuard<T, F> {  impl ScopeGuard<(), fn(())> {      /// Creates a new guarded object with the given cleanup function.      pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> { -        ScopeGuard::new_with_data((), move |_| cleanup()) +        ScopeGuard::new_with_data((), move |()| cleanup())      }  } @@ -256,7 +206,58 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {  /// Stores an opaque value.  /// -/// This is meant to be used with FFI objects that are never interpreted by Rust code. +/// `Opaque<T>` is meant to be used with FFI objects that are never interpreted by Rust code. +/// +/// It is used to wrap structs from the C side, like for example `Opaque<bindings::mutex>`. +/// It gets rid of all the usual assumptions that Rust has for a value: +/// +/// * The value is allowed to be uninitialized (for example have invalid bit patterns: `3` for a +///   [`bool`]). +/// * The value is allowed to be mutated, when a `&Opaque<T>` exists on the Rust side. +/// * No uniqueness for mutable references: it is fine to have multiple `&mut Opaque<T>` point to +///   the same value. +/// * The value is not allowed to be shared with other threads (i.e. it is `!Sync`). +/// +/// This has to be used for all values that the C side has access to, because it can't be ensured +/// that the C side is adhering to the usual constraints that Rust needs. +/// +/// Using `Opaque<T>` allows to continue to use references on the Rust side even for values shared +/// with C. +/// +/// # Examples +/// +/// ``` +/// # #![expect(unreachable_pub, clippy::disallowed_names)] +/// use kernel::types::Opaque; +/// # // Emulate a C struct binding which is from C, maybe uninitialized or not, only the C side +/// # // knows. +/// # mod bindings { +/// #     pub struct Foo { +/// #         pub val: u8, +/// #     } +/// # } +/// +/// // `foo.val` is assumed to be handled on the C side, so we use `Opaque` to wrap it. +/// pub struct Foo { +///     foo: Opaque<bindings::Foo>, +/// } +/// +/// impl Foo { +///     pub fn get_val(&self) -> u8 { +///         let ptr = Opaque::get(&self.foo); +/// +///         // SAFETY: `Self` is valid from C side. +///         unsafe { (*ptr).val } +///     } +/// } +/// +/// // Create an instance of `Foo` with the `Opaque` wrapper. +/// let foo = Foo { +///     foo: Opaque::new(bindings::Foo { val: 0xdb }), +/// }; +/// +/// assert_eq!(foo.get_val(), 0xdb); +/// ```  #[repr(transparent)]  pub struct Opaque<T> {      value: UnsafeCell<MaybeUninit<T>>, @@ -410,6 +411,7 @@ impl<T: AlwaysRefCounted> ARef<T> {      ///      /// struct Empty {}      /// +    /// # // SAFETY: TODO.      /// unsafe impl AlwaysRefCounted for Empty {      ///     fn inc_ref(&self) {}      ///     unsafe fn dec_ref(_obj: NonNull<Self>) {} @@ -417,6 +419,7 @@ impl<T: AlwaysRefCounted> ARef<T> {      ///      /// let mut data = Empty {};      /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap(); +    /// # // SAFETY: TODO.      /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };      /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);      /// @@ -461,6 +464,15 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> {  }  /// A sum type that always holds either a value of type `L` or `R`. +/// +/// # Examples +/// +/// ``` +/// use kernel::types::Either; +/// +/// let left_value: Either<i32, &str> = Either::Left(7); +/// let right_value: Either<i32, &str> = Either::Right("right value"); +/// ```  pub enum Either<L, R> {      /// Constructs an instance of [`Either`] containing a value of type `L`.      Left(L), @@ -469,70 +481,6 @@ pub enum Either<L, R> {      Right(R),  } -/// Types for which any bit pattern is valid. -/// -/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so -/// reading arbitrary bytes into something that contains a `bool` is not okay. -/// -/// It's okay for the type to have padding, as initializing those bytes has no effect. -/// -/// # Safety -/// -/// All bit-patterns must be valid for this type. This type must not have interior mutability. -pub unsafe trait FromBytes {} - -// SAFETY: All bit patterns are acceptable values of the types below. -unsafe impl FromBytes for u8 {} -unsafe impl FromBytes for u16 {} -unsafe impl FromBytes for u32 {} -unsafe impl FromBytes for u64 {} -unsafe impl FromBytes for usize {} -unsafe impl FromBytes for i8 {} -unsafe impl FromBytes for i16 {} -unsafe impl FromBytes for i32 {} -unsafe impl FromBytes for i64 {} -unsafe impl FromBytes for isize {} -// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit -// patterns are also acceptable for arrays of that type. -unsafe impl<T: FromBytes> FromBytes for [T] {} -unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {} - -/// Types that can be viewed as an immutable slice of initialized bytes. -/// -/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This -/// means that it should not have any padding, as padding bytes are uninitialized. Reading -/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive -/// information on the stack to userspace. -/// -/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered -/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so -/// this is a correctness requirement, but not a safety requirement. -/// -/// # Safety -/// -/// Values of this type may not contain any uninitialized bytes. This type must not have interior -/// mutability. -pub unsafe trait AsBytes {} - -// SAFETY: Instances of the following types have no uninitialized portions. -unsafe impl AsBytes for u8 {} -unsafe impl AsBytes for u16 {} -unsafe impl AsBytes for u32 {} -unsafe impl AsBytes for u64 {} -unsafe impl AsBytes for usize {} -unsafe impl AsBytes for i8 {} -unsafe impl AsBytes for i16 {} -unsafe impl AsBytes for i32 {} -unsafe impl AsBytes for i64 {} -unsafe impl AsBytes for isize {} -unsafe impl AsBytes for bool {} -unsafe impl AsBytes for char {} -unsafe impl AsBytes for str {} -// SAFETY: If individual values in an array have no uninitialized portions, then the array itself -// does not have any uninitialized portions either. -unsafe impl<T: AsBytes> AsBytes for [T] {} -unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {} -  /// Zero-sized type to mark types not [`Send`].  ///  /// Add this type as a field to your struct if your type should not be sent to a different task. diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs index e9347cff99ab..05b0b8d13b10 100644 --- a/rust/kernel/uaccess.rs +++ b/rust/kernel/uaccess.rs @@ -8,11 +8,10 @@ use crate::{      alloc::Flags,      bindings,      error::Result, +    ffi::{c_ulong, c_void},      prelude::*, -    types::{AsBytes, FromBytes}, +    transmute::{AsBytes, FromBytes},  }; -use alloc::vec::Vec; -use core::ffi::{c_ulong, c_void};  use core::mem::{size_of, MaybeUninit};  /// The type used for userspace addresses. @@ -46,15 +45,14 @@ pub type UserPtr = usize;  /// every byte in the region.  ///  /// ```no_run -/// use alloc::vec::Vec; -/// use core::ffi::c_void; +/// use kernel::ffi::c_void;  /// use kernel::error::Result;  /// use kernel::uaccess::{UserPtr, UserSlice};  ///  /// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {  ///     let (read, mut write) = UserSlice::new(uptr, len).reader_writer();  /// -///     let mut buf = Vec::new(); +///     let mut buf = KVec::new();  ///     read.read_all(&mut buf, GFP_KERNEL)?;  ///  ///     for b in &mut buf { @@ -69,8 +67,7 @@ pub type UserPtr = usize;  /// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.  ///  /// ```no_run -/// use alloc::vec::Vec; -/// use core::ffi::c_void; +/// use kernel::ffi::c_void;  /// use kernel::error::{code::EINVAL, Result};  /// use kernel::uaccess::{UserPtr, UserSlice};  /// @@ -78,21 +75,21 @@ pub type UserPtr = usize;  /// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {  ///     let read = UserSlice::new(uptr, len).reader();  /// -///     let mut buf = Vec::new(); +///     let mut buf = KVec::new();  ///     read.read_all(&mut buf, GFP_KERNEL)?;  ///  ///     todo!()  /// }  ///  /// /// Returns the bytes behind this user pointer if they are valid. -/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<Vec<u8>> { +/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<KVec<u8>> {  ///     if !is_valid(uptr, len)? {  ///         return Err(EINVAL);  ///     }  ///  ///     let read = UserSlice::new(uptr, len).reader();  /// -///     let mut buf = Vec::new(); +///     let mut buf = KVec::new();  ///     read.read_all(&mut buf, GFP_KERNEL)?;  ///  ///     // THIS IS A BUG! The bytes could have changed since we checked them. @@ -130,7 +127,7 @@ impl UserSlice {      /// Reads the entirety of the user slice, appending it to the end of the provided buffer.      ///      /// Fails with [`EFAULT`] if the read happens on a bad address. -    pub fn read_all(self, buf: &mut Vec<u8>, flags: Flags) -> Result { +    pub fn read_all(self, buf: &mut KVec<u8>, flags: Flags) -> Result {          self.reader().read_all(buf, flags)      } @@ -291,9 +288,9 @@ impl UserSliceReader {      /// Reads the entirety of the user slice, appending it to the end of the provided buffer.      ///      /// Fails with [`EFAULT`] if the read happens on a bad address. -    pub fn read_all(mut self, buf: &mut Vec<u8>, flags: Flags) -> Result { +    pub fn read_all(mut self, buf: &mut KVec<u8>, flags: Flags) -> Result {          let len = self.length; -        VecExt::<u8>::reserve(buf, len, flags)?; +        buf.reserve(len, flags)?;          // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes          // long. diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 553a5cba2adc..4d1d2062f6eb 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -216,7 +216,7 @@ impl Queue {              func: Some(func),          }); -        self.enqueue(Box::pin_init(init, flags).map_err(|_| AllocError)?); +        self.enqueue(KBox::pin_init(init, flags).map_err(|_| AllocError)?);          Ok(())      }  } @@ -239,9 +239,9 @@ impl<T> ClosureWork<T> {  }  impl<T: FnOnce()> WorkItem for ClosureWork<T> { -    type Pointer = Pin<Box<Self>>; +    type Pointer = Pin<KBox<Self>>; -    fn run(mut this: Pin<Box<Self>>) { +    fn run(mut this: Pin<KBox<Self>>) {          if let Some(func) = this.as_mut().project().take() {              (func)()          } @@ -297,7 +297,7 @@ pub unsafe trait RawWorkItem<const ID: u64> {  /// Defines the method that should be called directly when a work item is executed.  /// -/// This trait is implemented by `Pin<Box<T>>` and [`Arc<T>`], and is mainly intended to be +/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be  /// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`]  /// instead. The [`run`] method on this trait will usually just perform the appropriate  /// `container_of` translation and then call into the [`run`][WorkItem::run] method from the @@ -329,7 +329,7 @@ pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {  /// This trait is used when the `work_struct` field is defined using the [`Work`] helper.  pub trait WorkItem<const ID: u64 = 0> {      /// The pointer type that this struct is wrapped in. This will typically be `Arc<Self>` or -    /// `Pin<Box<Self>>`. +    /// `Pin<KBox<Self>>`.      type Pointer: WorkItemPointer<ID>;      /// The method that should be called when this work item is executed. @@ -366,7 +366,6 @@ unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}  impl<T: ?Sized, const ID: u64> Work<T, ID> {      /// Creates a new instance of [`Work`].      #[inline] -    #[allow(clippy::new_ret_no_self)]      pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>      where          T: WorkItem<ID>, @@ -520,13 +519,14 @@ impl_has_work! {      impl{T} HasWork<Self> for ClosureWork<T> { self.work }  } +// SAFETY: TODO.  unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>  where      T: WorkItem<ID, Pointer = Self>,      T: HasWork<T, ID>,  {      unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { -        // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`. +        // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.          let ptr = ptr as *mut Work<T, ID>;          // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.          let ptr = unsafe { T::work_container_of(ptr) }; @@ -537,6 +537,7 @@ where      }  } +// SAFETY: TODO.  unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>  where      T: WorkItem<ID, Pointer = Self>, @@ -565,18 +566,19 @@ where      }  } -unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>> +// SAFETY: TODO. +unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>  where      T: WorkItem<ID, Pointer = Self>,      T: HasWork<T, ID>,  {      unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { -        // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`. +        // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.          let ptr = ptr as *mut Work<T, ID>;          // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.          let ptr = unsafe { T::work_container_of(ptr) };          // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership. -        let boxed = unsafe { Box::from_raw(ptr) }; +        let boxed = unsafe { KBox::from_raw(ptr) };          // SAFETY: The box was already pinned when it was enqueued.          let pinned = unsafe { Pin::new_unchecked(boxed) }; @@ -584,7 +586,8 @@ where      }  } -unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<Box<T>> +// SAFETY: TODO. +unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<KBox<T>>  where      T: WorkItem<ID, Pointer = Self>,      T: HasWork<T, ID>, @@ -598,9 +601,9 @@ where          // SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily          // remove the `Pin` wrapper.          let boxed = unsafe { Pin::into_inner_unchecked(self) }; -        let ptr = Box::into_raw(boxed); +        let ptr = KBox::into_raw(boxed); -        // SAFETY: Pointers into a `Box` point at a valid value. +        // SAFETY: Pointers into a `KBox` point at a valid value.          let work_ptr = unsafe { T::raw_get_work(ptr) };          // SAFETY: `raw_get_work` returns a pointer to a valid value.          let work_ptr = unsafe { Work::raw_get(work_ptr) }; | 
