summaryrefslogtreecommitdiff
path: root/rust/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-27 13:44:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-27 13:44:54 -0700
commit910bfc26d16d07df5a2bfcbc63f0aa9d1397e2ef (patch)
treee2cc9fc3c860bfe26c7a5c2479b82594b41b87bf /rust/kernel
parentff30564411ffdcee49d579cb15eb13185a36e253 (diff)
parentb1263411112305acf2af728728591465becb45b0 (diff)
Merge tag 'rust-6.11' of https://github.com/Rust-for-Linux/linux
Pull Rust updates from Miguel Ojeda: "The highlight is the establishment of a minimum version for the Rust toolchain, including 'rustc' (and bundled tools) and 'bindgen'. The initial minimum will be the pinned version we currently have, i.e. we are just widening the allowed versions. That covers three stable Rust releases: 1.78.0, 1.79.0, 1.80.0 (getting released tomorrow), plus beta, plus nightly. This should already be enough for kernel developers in distributions that provide recent Rust compiler versions routinely, such as Arch Linux, Debian Unstable (outside the freeze period), Fedora Linux, Gentoo Linux (especially the testing channel), Nix (unstable) and openSUSE Slowroll and Tumbleweed. In addition, the kernel is now being built-tested by Rust's pre-merge CI. That is, every change that is attempting to land into the Rust compiler is tested against the kernel, and it is merged only if it passes. Similarly, the bindgen tool has agreed to build the kernel in their CI too. Thus, with the pre-merge CI in place, both projects hope to avoid unintentional changes to Rust that break the kernel. This means that, in general, apart from intentional changes on their side (that we will need to workaround conditionally on our side), the upcoming Rust compiler versions should generally work. In addition, the Rust project has proposed getting the kernel into stable Rust (at least solving the main blockers) as one of its three flagship goals for 2024H2 [1]. I would like to thank Niko, Sid, Emilio et al. for their help promoting the collaboration between Rust and the kernel. Toolchain and infrastructure: - Support several Rust toolchain versions. - Support several bindgen versions. - Remove 'cargo' requirement and simplify 'rusttest', thanks to 'alloc' having been dropped last cycle. - Provide proper error reporting for the 'rust-analyzer' target. 'kernel' crate: - Add 'uaccess' module with a safe userspace pointers abstraction. - Add 'page' module with a 'struct page' abstraction. - Support more complex generics in workqueue's 'impl_has_work!' macro. 'macros' crate: - Add 'firmware' field support to the 'module!' macro. - Improve 'module!' macro documentation. Documentation: - Provide instructions on what packages should be installed to build the kernel in some popular Linux distributions. - Introduce the new kernel.org LLVM+Rust toolchains. - Explain '#[no_std]'. And a few other small bits" Link: https://rust-lang.github.io/rust-project-goals/2024h2/index.html#flagship-goals [1] * tag 'rust-6.11' of https://github.com/Rust-for-Linux/linux: (26 commits) docs: rust: quick-start: add section on Linux distributions rust: warn about `bindgen` versions 0.66.0 and 0.66.1 rust: start supporting several `bindgen` versions rust: work around `bindgen` 0.69.0 issue rust: avoid assuming a particular `bindgen` build rust: start supporting several compiler versions rust: simplify Clippy warning flags set rust: relax most deny-level lints to warnings rust: allow `dead_code` for never constructed bindings rust: init: simplify from `map_err` to `inspect_err` rust: macros: indent list item in `paste!`'s docs rust: add abstraction for `struct page` rust: uaccess: add typed accessors for userspace pointers uaccess: always export _copy_[from|to]_user with CONFIG_RUST rust: uaccess: add userspace pointers kbuild: rust-analyzer: improve comment documentation kbuild: rust-analyzer: better error handling docs: rust: no_std is used rust: alloc: add __GFP_HIGHMEM flag rust: alloc: fix typo in docs for GFP_NOWAIT ...
Diffstat (limited to 'rust/kernel')
-rw-r--r--rust/kernel/alloc.rs17
-rw-r--r--rust/kernel/init.rs13
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/page.rs250
-rw-r--r--rust/kernel/types.rs64
-rw-r--r--rust/kernel/uaccess.rs388
-rw-r--r--rust/kernel/workqueue.rs16
7 files changed, 733 insertions, 17 deletions
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index 531b5e471cb1..1966bd407017 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -20,6 +20,13 @@ pub struct AllocError;
#[derive(Clone, Copy)]
pub struct Flags(u32);
+impl Flags {
+ /// Get the raw representation of this flag.
+ pub(crate) fn as_raw(self) -> u32 {
+ self.0
+ }
+}
+
impl core::ops::BitOr for Flags {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
@@ -52,6 +59,14 @@ pub mod flags {
/// This is normally or'd with other flags.
pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
+ /// Allow the allocation to be in high memory.
+ ///
+ /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
+ /// be used with `kmalloc` and other similar methods.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
+
/// Users can not sleep and need the allocation to succeed.
///
/// A lower watermark is applied to allow access to "atomic reserves". The current
@@ -66,7 +81,7 @@ pub mod flags {
/// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
- /// Ror kernel allocations that should not stall for direct reclaim, start physical IO or
+ /// For kernel allocations that should not stall for direct reclaim, start physical IO or
/// use any filesystem callback. It is very likely to fail to allocate memory, even for very
/// small allocations.
pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 68605b633e73..495c09ebe3a3 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -843,11 +843,8 @@ where
let val = unsafe { &mut *slot };
// SAFETY: `slot` is considered pinned.
let val = unsafe { Pin::new_unchecked(val) };
- (self.1)(val).map_err(|e| {
- // SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ // SAFETY: `slot` was initialized above.
+ (self.1)(val).inspect_err(|_| unsafe { core::ptr::drop_in_place(slot) })
}
}
@@ -941,11 +938,9 @@ where
// SAFETY: All requirements fulfilled since this function is `__init`.
unsafe { self.0.__pinned_init(slot)? };
// SAFETY: The above call initialized `slot` and we still have unique access.
- (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ (self.1)(unsafe { &mut *slot }).inspect_err(|_|
// SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ unsafe { core::ptr::drop_in_place(slot) })
}
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index e6b7d3a80bbc..274bdc1b0a82 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -40,6 +40,7 @@ pub mod ioctl;
pub mod kunit;
#[cfg(CONFIG_NET)]
pub mod net;
+pub mod page;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -50,6 +51,7 @@ pub mod sync;
pub mod task;
pub mod time;
pub mod types;
+pub mod uaccess;
pub mod workqueue;
#[doc(hidden)]
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
new file mode 100644
index 000000000000..208a006d587c
--- /dev/null
+++ b/rust/kernel/page.rs
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+
+use crate::{
+ alloc::{AllocError, Flags},
+ bindings,
+ error::code::*,
+ error::Result,
+ uaccess::UserSliceReader,
+};
+use core::ptr::{self, NonNull};
+
+/// A bitwise shift for the page size.
+pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
+
+/// The number of bytes in a page.
+pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;
+
+/// A bitmask that gives the page containing a given address.
+pub const PAGE_MASK: usize = !(PAGE_SIZE - 1);
+
+/// A pointer to a page that owns the page allocation.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the page.
+pub struct Page {
+ page: NonNull<bindings::page>,
+}
+
+// SAFETY: Pages have no logic that relies on them staying on a given thread, so moving them across
+// threads is safe.
+unsafe impl Send for Page {}
+
+// SAFETY: Pages have no logic that relies on them not being accessed concurrently, so accessing
+// them concurrently is safe.
+unsafe impl Sync for Page {}
+
+impl Page {
+ /// Allocates a new page.
+ ///
+ /// # Examples
+ ///
+ /// Allocate memory for a page.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL)?;
+ /// # Ok(()) }
+ /// ```
+ ///
+ /// Allocate memory for a page and zero its contents.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+ /// # Ok(()) }
+ /// ```
+ pub fn alloc_page(flags: Flags) -> Result<Self, AllocError> {
+ // SAFETY: Depending on the value of `gfp_flags`, this call may sleep. Other than that, it
+ // is always safe to call this method.
+ let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) };
+ let page = NonNull::new(page).ok_or(AllocError)?;
+ // INVARIANT: We just successfully allocated a page, so we now have ownership of the newly
+ // allocated page. We transfer that ownership to the new `Page` object.
+ Ok(Self { page })
+ }
+
+ /// Returns a raw pointer to the page.
+ pub fn as_ptr(&self) -> *mut bindings::page {
+ self.page.as_ptr()
+ }
+
+ /// Runs a piece of code with this page mapped to an address.
+ ///
+ /// The page is unmapped when this call returns.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `PAGE_SIZE` bytes and for the duration in which the closure is called. The pointer might
+ /// only be mapped on the current thread, and when that is the case, dereferencing it on other
+ /// threads is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't
+ /// cause data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
+ // SAFETY: `page` is valid due to the type invariants on `Page`.
+ let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
+
+ let res = f(mapped_addr.cast());
+
+ // This unmaps the page mapped above.
+ //
+ // SAFETY: Since this API takes the user code as a closure, it can only be used in a manner
+ // where the pages are unmapped in reverse order. This is as required by `kunmap_local`.
+ //
+ // In other words, if this call to `kunmap_local` happens when a different page should be
+ // unmapped first, then there must necessarily be a call to `kmap_local_page` other than the
+ // call just above in `with_page_mapped` that made that possible. In this case, it is the
+ // unsafe block that wraps that other call that is incorrect.
+ unsafe { bindings::kunmap_local(mapped_addr) };
+
+ res
+ }
+
+ /// Runs a piece of code with a raw pointer to a slice of this page, with bounds checking.
+ ///
+ /// If `f` is called, then it will be called with a pointer that points at `off` bytes into the
+ /// page, and the pointer will be valid for at least `len` bytes. The pointer is only valid on
+ /// this task, as this method uses a local mapping.
+ ///
+ /// If `off` and `len` refers to a region outside of this page, then this method returns
+ /// [`EINVAL`] and does not call `f`.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `len` bytes and for the duration in which the closure is called. The pointer might only be
+ /// mapped on the current thread, and when that is the case, dereferencing it on other threads
+ /// is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't cause
+ /// data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_pointer_into_page<T>(
+ &self,
+ off: usize,
+ len: usize,
+ f: impl FnOnce(*mut u8) -> Result<T>,
+ ) -> Result<T> {
+ let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
+
+ if bounds_ok {
+ self.with_page_mapped(move |page_addr| {
+ // SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
+ // result in a pointer that is in bounds or one off the end of the page.
+ f(unsafe { page_addr.add(off) })
+ })
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Maps the page and reads from it into the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `dst` is valid for writing `len` bytes.
+ /// * Callers must ensure that this call does not race with a write to the same page that
+ /// overlaps with this read.
+ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |src| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then
+ // it has performed a bounds check and guarantees that `src` is
+ // valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and writes into it from the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `src` is valid for reading `len` bytes.
+ /// * Callers must ensure that this call does not race with a read or write to the same page
+ /// that overlaps with this write.
+ pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and zeroes the given slice.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn fill_zero_raw(&self, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::write_bytes(dst, 0u8, len) };
+ Ok(())
+ })
+ }
+
+ /// Copies data from userspace into this page.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// Like the other `UserSliceReader` methods, data races are allowed on the userspace address.
+ /// However, they are not allowed on the page you are copying into.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn copy_from_user_slice_raw(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes. Furthermore, we have
+ // exclusive access to the slice since the caller guarantees that there are no races.
+ reader.read_raw(unsafe { core::slice::from_raw_parts_mut(dst.cast(), len) })
+ })
+ }
+}
+
+impl Drop for Page {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we have ownership of the page and can free it.
+ unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 2e7c9008621f..bd189d646adb 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -409,3 +409,67 @@ pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `R`.
Right(R),
}
+
+/// Types for which any bit pattern is valid.
+///
+/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
+/// reading arbitrary bytes into something that contains a `bool` is not okay.
+///
+/// It's okay for the type to have padding, as initializing those bytes has no effect.
+///
+/// # Safety
+///
+/// All bit-patterns must be valid for this type. This type must not have interior mutability.
+pub unsafe trait FromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl FromBytes for u8 {}
+unsafe impl FromBytes for u16 {}
+unsafe impl FromBytes for u32 {}
+unsafe impl FromBytes for u64 {}
+unsafe impl FromBytes for usize {}
+unsafe impl FromBytes for i8 {}
+unsafe impl FromBytes for i16 {}
+unsafe impl FromBytes for i32 {}
+unsafe impl FromBytes for i64 {}
+unsafe impl FromBytes for isize {}
+// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
+// patterns are also acceptable for arrays of that type.
+unsafe impl<T: FromBytes> FromBytes for [T] {}
+unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {}
+
+/// Types that can be viewed as an immutable slice of initialized bytes.
+///
+/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This
+/// means that it should not have any padding, as padding bytes are uninitialized. Reading
+/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive
+/// information on the stack to userspace.
+///
+/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered
+/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so
+/// this is a correctness requirement, but not a safety requirement.
+///
+/// # Safety
+///
+/// Values of this type may not contain any uninitialized bytes. This type must not have interior
+/// mutability.
+pub unsafe trait AsBytes {}
+
+// SAFETY: Instances of the following types have no uninitialized portions.
+unsafe impl AsBytes for u8 {}
+unsafe impl AsBytes for u16 {}
+unsafe impl AsBytes for u32 {}
+unsafe impl AsBytes for u64 {}
+unsafe impl AsBytes for usize {}
+unsafe impl AsBytes for i8 {}
+unsafe impl AsBytes for i16 {}
+unsafe impl AsBytes for i32 {}
+unsafe impl AsBytes for i64 {}
+unsafe impl AsBytes for isize {}
+unsafe impl AsBytes for bool {}
+unsafe impl AsBytes for char {}
+unsafe impl AsBytes for str {}
+// SAFETY: If individual values in an array have no uninitialized portions, then the array itself
+// does not have any uninitialized portions either.
+unsafe impl<T: AsBytes> AsBytes for [T] {}
+unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {}
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
new file mode 100644
index 000000000000..e9347cff99ab
--- /dev/null
+++ b/rust/kernel/uaccess.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Slices to user space memory regions.
+//!
+//! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
+
+use crate::{
+ alloc::Flags,
+ bindings,
+ error::Result,
+ prelude::*,
+ types::{AsBytes, FromBytes},
+};
+use alloc::vec::Vec;
+use core::ffi::{c_ulong, c_void};
+use core::mem::{size_of, MaybeUninit};
+
+/// The type used for userspace addresses.
+pub type UserPtr = usize;
+
+/// A pointer to an area in userspace memory, which can be either read-only or read-write.
+///
+/// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
+/// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
+/// *including data races to/from userspace memory*, is permitted, because fundamentally another
+/// userspace thread/process could always be modifying memory at the same time (in the same way that
+/// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
+/// presence of a race, the exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
+/// expect that multiple reads of the same address will return the same value.
+///
+/// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
+/// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
+/// the read length and the next read will start from there. This helps prevent accidentally reading
+/// the same location twice and causing a TOCTOU bug.
+///
+/// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
+/// ensure that there aren't multiple readers or writers to the same location.
+///
+/// If double-fetching a memory location is necessary for some reason, then that is done by creating
+/// multiple readers to the same memory location, e.g. using [`clone_reader`].
+///
+/// # Examples
+///
+/// Takes a region of userspace memory from the current process, and modify it by adding one to
+/// every byte in the region.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::Result;
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {
+/// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// for b in &mut buf {
+/// *b = b.wrapping_add(1);
+/// }
+///
+/// write.write_slice(&buf)?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::{code::EINVAL, Result};
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// /// Returns whether the data in this region is valid.
+/// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// todo!()
+/// }
+///
+/// /// Returns the bytes behind this user pointer if they are valid.
+/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<Vec<u8>> {
+/// if !is_valid(uptr, len)? {
+/// return Err(EINVAL);
+/// }
+///
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// // THIS IS A BUG! The bytes could have changed since we checked them.
+/// //
+/// // To avoid this kind of bug, don't call `UserSlice::new` multiple
+/// // times with the same address.
+/// Ok(buf)
+/// }
+/// ```
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+/// [`clone_reader`]: UserSliceReader::clone_reader
+pub struct UserSlice {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSlice {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
+ /// safely be constructed inside a kernel thread with no current userspace process. Reads and
+ /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
+ /// of the current process and enforce that the address range is within the user range (no
+ /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
+ /// attempt to read or write, not in the call to `UserSlice::new`.
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
+ /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
+ /// at most once.
+ pub fn new(ptr: UserPtr, length: usize) -> Self {
+ UserSlice { ptr, length }
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ self.reader().read_all(buf, flags)
+ }
+
+ /// Constructs a [`UserSliceReader`].
+ pub fn reader(self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs a [`UserSliceWriter`].
+ pub fn writer(self) -> UserSliceWriter {
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
+ ///
+ /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
+ pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
+ (
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ )
+ }
+}
+
+/// A reader for [`UserSlice`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSliceReader {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceReader {
+ /// Skip the provided number of bytes.
+ ///
+ /// Returns an error if skipping more than the length of the buffer.
+ pub fn skip(&mut self, num_skip: usize) -> Result {
+ // Update `self.length` first since that's the fallible part of this operation.
+ self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
+ self.ptr = self.ptr.wrapping_add(num_skip);
+ Ok(())
+ }
+
+ /// Create a reader that can access the same range of data.
+ ///
+ /// Reading from the clone does not advance the current reader.
+ ///
+ /// The caller should take care to not introduce TOCTOU issues, as described in the
+ /// documentation for [`UserSlice`].
+ pub fn clone_reader(&self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Returns the number of bytes left to be read from this reader.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no data is available in the io buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ ///
+ /// # Guarantees
+ ///
+ /// After a successful call to this method, all bytes in `out` are initialized.
+ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+ let len = out.len();
+ let out_ptr = out.as_mut_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write
+ // that many bytes to it.
+ let res =
+ unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
+ // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
+ // `out`.
+ let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ self.read_raw(out)
+ }
+
+ /// Reads a value of the specified type.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`].
+ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+ // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+ //
+ // By using the _copy_from_user variant, we skip the check_object_size check that verifies
+ // the kernel pointer. This mirrors the logic on the C side that skips the check when the
+ // length is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_from_user(
+ out.as_mut_ptr().cast::<c_void>(),
+ self.ptr as *const c_void,
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
+ // `FromBytes`, any bit-pattern is a valid value for this type.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(mut self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ let len = self.length;
+ VecExt::<u8>::reserve(buf, len, flags)?;
+
+ // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes
+ // long.
+ self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
+
+ // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
+ // vector have been initialized.
+ unsafe { buf.set_len(buf.len() + len) };
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlice`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSliceWriter {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceWriter {
+ /// Returns the amount of space remaining in this buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no more data can be written to this buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Writes raw data to this user pointer from a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+ let len = data.len();
+ let data_ptr = data.as_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read
+ // that many bytes from it.
+ let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Writes the provided Rust value to this userspace pointer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+ // `size_of::<T>()` bytes.
+ //
+ // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
+ // kernel pointer. This mirrors the logic on the C side that skips the check when the length
+ // is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_to_user(
+ self.ptr as *mut c_void,
+ (value as *const T).cast::<c_void>(),
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 1cec63a2aea8..553a5cba2adc 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -482,24 +482,26 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// use kernel::sync::Arc;
/// use kernel::workqueue::{self, impl_has_work, Work};
///
-/// struct MyStruct {
-/// work_field: Work<MyStruct, 17>,
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: Work<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
/// }
///
/// impl_has_work! {
-/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
+/// impl{'a, T, const N: usize} HasWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
/// }
/// ```
#[macro_export]
macro_rules! impl_has_work {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasWork<$work_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ident),*>)?
+ for $self:ty
{ self.$field:ident }
)*) => {$(
// SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
// type.
- unsafe impl$(<$($implarg),*>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self $(<$($selfarg),*>)? {
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
#[inline]
@@ -515,7 +517,7 @@ macro_rules! impl_has_work {
pub use impl_has_work;
impl_has_work! {
- impl<T> HasWork<Self> for ClosureWork<T> { self.work }
+ impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>