diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-05-27 10:48:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-05-27 10:48:36 -0700 |
commit | 97851c601636a0e40f8237b83a6b70fc5e231e0c (patch) | |
tree | a4702d401e65fe10b9edce07172c89f22299f612 /lib | |
parent | dd3922cf9d4d1421e5883614d1a6add912131c00 (diff) | |
parent | ba575cea29fd82a0e6836fefcd51db36f1ff8a92 (diff) |
Merge tag 'ratelimit.2025.05.25a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull rate-limit updates from Paul McKenney:
"lib/ratelimit: Reduce false-positive and silent misses:
- Reduce open-coded use of ratelimit_state structure fields.
- Convert the ->missed field to atomic_t.
- Count misses that are due to lock contention.
- Eliminate jiffies=0 special case.
- Reduce ___ratelimit() false-positive rate limiting (Petr Mladek).
- Allow zero ->burst to hard-disable rate limiting.
- Optimize away atomic operations when a miss is guaranteed.
- Warn if ->interval or ->burst are negative (Petr Mladek).
- Simplify the resulting code.
A smoke test and stress test have been created, but they are not yet
ready for mainline. With luck, we will offer them for the v6.17 merge
window"
* tag 'ratelimit.2025.05.25a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
ratelimit: Drop redundant accesses to burst
ratelimit: Use nolock_ret restructuring to collapse common case code
ratelimit: Use nolock_ret label to collapse lock-failure code
ratelimit: Use nolock_ret label to save a couple of lines of code
ratelimit: Simplify common-case exit path
ratelimit: Warn if ->interval or ->burst are negative
ratelimit: Avoid atomic decrement under lock if already rate-limited
ratelimit: Avoid atomic decrement if already rate-limited
ratelimit: Don't flush misses counter if RATELIMIT_MSG_ON_RELEASE
ratelimit: Force re-initialization when rate-limiting re-enabled
ratelimit: Allow zero ->burst to disable ratelimiting
ratelimit: Reduce ___ratelimit() false-positive rate limiting
ratelimit: Avoid jiffies=0 special case
ratelimit: Count misses due to lock contention
ratelimit: Convert the ->missed field to atomic_t
drm/amd/pm: Avoid open-coded use of ratelimit_state structure's internals
drm/i915: Avoid open-coded use of ratelimit_state structure's ->missed field
random: Avoid open-coded use of ratelimit_state structure's ->missed field
ratelimit: Create functions to handle ratelimit_state internals
Diffstat (limited to 'lib')
-rw-r--r-- | lib/ratelimit.c | 75 |
1 files changed, 52 insertions, 23 deletions
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index ce945c17980b9..859c251b23ce2 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -33,44 +33,73 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) int interval = READ_ONCE(rs->interval); int burst = READ_ONCE(rs->burst); unsigned long flags; - int ret; + int ret = 0; - if (!interval) - return 1; + /* + * Zero interval says never limit, otherwise, non-positive burst + * says always limit. + */ + if (interval <= 0 || burst <= 0) { + WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst); + ret = interval == 0 || burst > 0; + if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) || + !raw_spin_trylock_irqsave(&rs->lock, flags)) + goto nolock_ret; + + /* Force re-initialization once re-enabled. */ + rs->flags &= ~RATELIMIT_INITIALIZED; + goto unlock_ret; + } /* - * If we contend on this state's lock then almost - * by definition we are too busy to print a message, - * in addition to the one that will be printed by - * the entity that is holding the lock already: + * If we contend on this state's lock then just check if + * the current burst is used or not. It might cause + * false positive when we are past the interval and + * the current lock owner is just about to reset it. */ - if (!raw_spin_trylock_irqsave(&rs->lock, flags)) - return 0; + if (!raw_spin_trylock_irqsave(&rs->lock, flags)) { + if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED && + atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0) + ret = 1; + goto nolock_ret; + } - if (!rs->begin) + if (!(rs->flags & RATELIMIT_INITIALIZED)) { rs->begin = jiffies; + rs->flags |= RATELIMIT_INITIALIZED; + atomic_set(&rs->rs_n_left, rs->burst); + } if (time_is_before_jiffies(rs->begin + interval)) { - if (rs->missed) { - if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { + int m; + + /* + * Reset rs_n_left ASAP to reduce false positives + * in parallel calls, see above. + */ + atomic_set(&rs->rs_n_left, rs->burst); + rs->begin = jiffies; + + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { + m = ratelimit_state_reset_miss(rs); + if (m) { printk_deferred(KERN_WARNING - "%s: %d callbacks suppressed\n", - func, rs->missed); - rs->missed = 0; + "%s: %d callbacks suppressed\n", func, m); } } - rs->begin = jiffies; - rs->printed = 0; } - if (burst && burst > rs->printed) { - rs->printed++; + + /* Note that the burst might be taken by a parallel call. */ + if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0) ret = 1; - } else { - rs->missed++; - ret = 0; - } + +unlock_ret: raw_spin_unlock_irqrestore(&rs->lock, flags); +nolock_ret: + if (!ret) + ratelimit_state_inc_miss(rs); + return ret; } EXPORT_SYMBOL(___ratelimit); |