summaryrefslogtreecommitdiff
path: root/rust/kernel/time.rs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-09-30 19:12:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-09-30 19:12:49 -0700
commitf4e0ff7e45c30f4665cfbbe2f0538e9c5789bebc (patch)
tree6703541ef1fec9d20dafc751d2e971879c429884 /rust/kernel/time.rs
parentae28ed4578e6d5a481e39c5a9827f27048661fdd (diff)
parentf3f6b3664302e16ef1c6b91034a72df5564d6b8a (diff)
Merge tag 'rust-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/ojeda/linux
Pull rust updates from Miguel Ojeda: "Toolchain and infrastructure: - Derive 'Zeroable' for all structs and unions generated by 'bindgen' where possible and corresponding cleanups. To do so, add the 'pin-init' crate as a dependency to 'bindings' and 'uapi'. It also includes its first use in the 'cpufreq' module, with more to come in the next cycle. - Add warning to the 'rustdoc' target to detect broken 'srctree/' links and fix existing cases. - Remove support for unused (since v6.16) host '#[test]'s, simplifying the 'rusttest' target. Tests should generally run within KUnit. 'kernel' crate: - Add 'ptr' module with a new 'Alignment' type, which is always a power of two and is used to validate that a given value is a valid alignment and to perform masking and alignment operations: // Checked at build time. assert_eq!(Alignment::new::<16>().as_usize(), 16); // Checked at runtime. assert_eq!(Alignment::new_checked(15), None); assert_eq!(Alignment::of::<u8>().log2(), 0); assert_eq!(0x25u8.align_down(Alignment::new::<0x10>()), 0x20); assert_eq!(0x5u8.align_up(Alignment::new::<0x10>()), Some(0x10)); assert_eq!(u8::MAX.align_up(Alignment::new::<0x10>()), None); It also includes its first use in Nova. - Add 'core::mem::{align,size}_of{,_val}' to the prelude, matching Rust 1.80.0. - Keep going with the steps on our migration to the standard library 'core::ffi::CStr' type (use 'kernel::{fmt, prelude::fmt!}' and use upstream method names). - 'error' module: improve 'Error::from_errno' and 'to_result' documentation, including examples/tests. - 'sync' module: extend 'aref' submodule documentation now that it exists, and more updates to complete the ongoing move of 'ARef' and 'AlwaysRefCounted' to 'sync::aref'. - 'list' module: add an example/test for 'ListLinksSelfPtr' usage. - 'alloc' module: - Implement 'Box::pin_slice()', which constructs a pinned slice of elements. - Provide information about the minimum alignment guarantees of 'Kmalloc', 'Vmalloc' and 'KVmalloc'. - Take minimum alignment guarantees of allocators for 'ForeignOwnable' into account. - Remove the 'allocator_test' (including 'Cmalloc'). - Add doctest for 'Vec::as_slice()'. - Constify various methods. - 'time' module: - Add methods on 'HrTimer' that can only be called with exclusive access to an unarmed timer, or from timer callback context. - Add arithmetic operations to 'Instant' and 'Delta'. - Add a few convenience and access methods to 'HrTimer' and 'Instant'. 'macros' crate: - Reduce collections in 'quote!' macro. And a few other cleanups and improvements" * tag 'rust-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/ojeda/linux: (58 commits) gpu: nova-core: use Alignment for alignment-related operations rust: add `Alignment` type rust: macros: reduce collections in `quote!` macro rust: acpi: use `core::ffi::CStr` method names rust: of: use `core::ffi::CStr` method names rust: net: use `core::ffi::CStr` method names rust: miscdevice: use `core::ffi::CStr` method names rust: kunit: use `core::ffi::CStr` method names rust: firmware: use `core::ffi::CStr` method names rust: drm: use `core::ffi::CStr` method names rust: cpufreq: use `core::ffi::CStr` method names rust: configfs: use `core::ffi::CStr` method names rust: auxiliary: use `core::ffi::CStr` method names drm/panic: use `core::ffi::CStr` method names rust: device: use `kernel::{fmt,prelude::fmt!}` rust: sync: use `kernel::{fmt,prelude::fmt!}` rust: seq_file: use `kernel::{fmt,prelude::fmt!}` rust: kunit: use `kernel::{fmt,prelude::fmt!}` rust: file: use `kernel::{fmt,prelude::fmt!}` rust: device: use `kernel::{fmt,prelude::fmt!}` ...
Diffstat (limited to 'rust/kernel/time.rs')
-rw-r--r--rust/kernel/time.rs163
1 files changed, 162 insertions, 1 deletions
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index 64c8dcf548d6..6ea98dfcd027 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -25,6 +25,7 @@
//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
use core::marker::PhantomData;
+use core::ops;
pub mod delay;
pub mod hrtimer;
@@ -200,9 +201,31 @@ impl<C: ClockSource> Instant<C> {
pub(crate) fn as_nanos(&self) -> i64 {
self.inner
}
+
+ /// Create an [`Instant`] from a `ktime_t` without checking if it is non-negative.
+ ///
+ /// # Panics
+ ///
+ /// On debug builds, this function will panic if `ktime` is not in the range from 0 to
+ /// `KTIME_MAX`.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ktime` is in the range from 0 to `KTIME_MAX`.
+ #[inline]
+ pub(crate) unsafe fn from_ktime(ktime: bindings::ktime_t) -> Self {
+ debug_assert!(ktime >= 0);
+
+ // INVARIANT: Our safety contract ensures that `ktime` is in the range from 0 to
+ // `KTIME_MAX`.
+ Self {
+ inner: ktime,
+ _c: PhantomData,
+ }
+ }
}
-impl<C: ClockSource> core::ops::Sub for Instant<C> {
+impl<C: ClockSource> ops::Sub for Instant<C> {
type Output = Delta;
// By the type invariant, it never overflows.
@@ -214,6 +237,46 @@ impl<C: ClockSource> core::ops::Sub for Instant<C> {
}
}
+impl<T: ClockSource> ops::Add<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner + rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
+ }
+}
+
+impl<T: ClockSource> ops::Sub<Delta> for Instant<T> {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Delta) -> Self::Output {
+ // INVARIANT: With arithmetic over/underflow checks enabled, this will panic if we overflow
+ // (e.g. go above `KTIME_MAX`)
+ let res = self.inner - rhs.nanos;
+
+ // INVARIANT: With overflow checks enabled, we verify here that the value is >= 0
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res >= 0);
+
+ Self {
+ inner: res,
+ _c: PhantomData,
+ }
+ }
+}
+
/// A span of time.
///
/// This struct represents a span of time, with its value stored as nanoseconds.
@@ -224,6 +287,78 @@ pub struct Delta {
nanos: i64,
}
+impl ops::Add for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, rhs: Self) -> Self {
+ Self {
+ nanos: self.nanos + rhs.nanos,
+ }
+ }
+}
+
+impl ops::AddAssign for Delta {
+ #[inline]
+ fn add_assign(&mut self, rhs: Self) {
+ self.nanos += rhs.nanos;
+ }
+}
+
+impl ops::Sub for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Self) -> Self::Output {
+ Self {
+ nanos: self.nanos - rhs.nanos,
+ }
+ }
+}
+
+impl ops::SubAssign for Delta {
+ #[inline]
+ fn sub_assign(&mut self, rhs: Self) {
+ self.nanos -= rhs.nanos;
+ }
+}
+
+impl ops::Mul<i64> for Delta {
+ type Output = Self;
+
+ #[inline]
+ fn mul(self, rhs: i64) -> Self::Output {
+ Self {
+ nanos: self.nanos * rhs,
+ }
+ }
+}
+
+impl ops::MulAssign<i64> for Delta {
+ #[inline]
+ fn mul_assign(&mut self, rhs: i64) {
+ self.nanos *= rhs;
+ }
+}
+
+impl ops::Div for Delta {
+ type Output = i64;
+
+ #[inline]
+ fn div(self, rhs: Self) -> Self::Output {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.nanos / rhs.nanos
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ // SAFETY: This function is always safe to call regardless of the input values
+ unsafe { bindings::div64_s64(self.nanos, rhs.nanos) }
+ }
+ }
+}
+
impl Delta {
/// A span of time equal to zero.
pub const ZERO: Self = Self { nanos: 0 };
@@ -312,4 +447,30 @@ impl Delta {
bindings::ktime_to_ms(self.as_nanos())
}
}
+
+ /// Return `self % dividend` where `dividend` is in nanoseconds.
+ ///
+ /// The kernel doesn't have any emulation for `s64 % s64` on 32 bit platforms, so this is
+ /// limited to 32 bit dividends.
+ #[inline]
+ pub fn rem_nanos(self, dividend: i32) -> Self {
+ #[cfg(CONFIG_64BIT)]
+ {
+ Self {
+ nanos: self.as_nanos() % i64::from(dividend),
+ }
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ {
+ let mut rem = 0;
+
+ // SAFETY: `rem` is in the stack, so we can always provide a valid pointer to it.
+ unsafe { bindings::div_s64_rem(self.as_nanos(), dividend, &mut rem) };
+
+ Self {
+ nanos: i64::from(rem),
+ }
+ }
+ }
}