diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-24 20:55:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-24 20:55:03 -0700 |
commit | 23608993bb224968a17d6db0df435ddb8e77412b (patch) | |
tree | f606373c11942b9ad75a89cf43a9ecb00172b0c1 /kernel/locking/rtmutex.c | |
parent | 3ba7dfb8da62c43ea02bc278863367c2b0427cc1 (diff) | |
parent | 35e6b537af85d97e0aafd8f2829dfa884a22df20 (diff) |
Merge tag 'locking-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Locking primitives:
- Micro-optimize percpu_{,try_}cmpxchg{64,128}_op() and
{,try_}cmpxchg{64,128} on x86 (Uros Bizjak)
- mutexes: extend debug checks in mutex_lock() (Yunhui Cui)
- Misc cleanups (Uros Bizjak)
Lockdep:
- Fix might_fault() lockdep check of current->mm->mmap_lock (Peter
Zijlstra)
- Don't disable interrupts on RT in disable_irq_nosync_lockdep.*()
(Sebastian Andrzej Siewior)
- Disable KASAN instrumentation of lockdep.c (Waiman Long)
- Add kasan_check_byte() check in lock_acquire() (Waiman Long)
- Misc cleanups (Sebastian Andrzej Siewior)
Rust runtime integration:
- Use Pin for all LockClassKey usages (Mitchell Levy)
- sync: Add accessor for the lock behind a given guard (Alice Ryhl)
- sync: condvar: Add wait_interruptible_freezable() (Alice Ryhl)
- sync: lock: Add an example for Guard:: Lock_ref() (Boqun Feng)
Split-lock detection feature (x86):
- Fix warning mode with disabled mitigation mode (Maksim Davydov)
Locking events:
- Add locking events for rtmutex slow paths (Waiman Long)
- Add locking events for lockdep (Waiman Long)"
* tag 'locking-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Remove disable_irq_lockdep()
lockdep: Don't disable interrupts on RT in disable_irq_nosync_lockdep.*()
rust: lockdep: Use Pin for all LockClassKey usages
rust: sync: condvar: Add wait_interruptible_freezable()
rust: sync: lock: Add an example for Guard:: Lock_ref()
rust: sync: Add accessor for the lock behind a given guard
locking/lockdep: Add kasan_check_byte() check in lock_acquire()
locking/lockdep: Disable KASAN instrumentation of lockdep.c
locking/lock_events: Add locking events for lockdep
locking/lock_events: Add locking events for rtmutex slow paths
x86/split_lock: Fix the delayed detection logic
lockdep/mm: Fix might_fault() lockdep check of current->mm->mmap_lock
x86/locking: Remove semicolon from "lock" prefix
locking/mutex: Add MUTEX_WARN_ON() into fast path
x86/locking: Use asm_inline for {,try_}cmpxchg{64,128} emulations
x86/locking: Use ALT_OUTPUT_SP() for percpu_{,try_}cmpxchg{64,128}_op()
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 29 |
1 files changed, 24 insertions, 5 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4a8df1800cbb..c80902eacd79 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -27,6 +27,7 @@ #include <trace/events/lock.h> #include "rtmutex_common.h" +#include "lock_events.h" #ifndef WW_RT # define build_ww_mutex() (false) @@ -1612,10 +1613,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, struct task_struct *owner; int ret = 0; + lockevent_inc(rtmutex_slow_block); for (;;) { /* Try to acquire the lock: */ - if (try_to_take_rt_mutex(lock, current, waiter)) + if (try_to_take_rt_mutex(lock, current, waiter)) { + lockevent_inc(rtmutex_slow_acq3); break; + } if (timeout && !timeout->task) { ret = -ETIMEDOUT; @@ -1638,8 +1642,10 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, owner = NULL; raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); - if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) + if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) { + lockevent_inc(rtmutex_slow_sleep); rt_mutex_schedule(); + } raw_spin_lock_irq(&lock->wait_lock); set_current_state(state); @@ -1694,6 +1700,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, int ret; lockdep_assert_held(&lock->wait_lock); + lockevent_inc(rtmutex_slowlock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { @@ -1701,6 +1708,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, __ww_mutex_check_waiters(rtm, ww_ctx, wake_q); ww_mutex_lock_acquired(ww, ww_ctx); } + lockevent_inc(rtmutex_slow_acq1); return 0; } @@ -1719,10 +1727,12 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, __ww_mutex_check_waiters(rtm, ww_ctx, wake_q); ww_mutex_lock_acquired(ww, ww_ctx); } + lockevent_inc(rtmutex_slow_acq2); } else { __set_current_state(TASK_RUNNING); remove_waiter(lock, waiter); rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); + lockevent_inc(rtmutex_deadlock); } /* @@ -1751,6 +1761,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, &waiter, wake_q); debug_rt_mutex_free_waiter(&waiter); + lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q)); return ret; } @@ -1823,9 +1834,12 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, struct task_struct *owner; lockdep_assert_held(&lock->wait_lock); + lockevent_inc(rtlock_slowlock); - if (try_to_take_rt_mutex(lock, current, NULL)) + if (try_to_take_rt_mutex(lock, current, NULL)) { + lockevent_inc(rtlock_slow_acq1); return; + } rt_mutex_init_rtlock_waiter(&waiter); @@ -1838,8 +1852,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, for (;;) { /* Try to acquire the lock again */ - if (try_to_take_rt_mutex(lock, current, &waiter)) + if (try_to_take_rt_mutex(lock, current, &waiter)) { + lockevent_inc(rtlock_slow_acq2); break; + } if (&waiter == rt_mutex_top_waiter(lock)) owner = rt_mutex_owner(lock); @@ -1847,8 +1863,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, owner = NULL; raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); - if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) + if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) { + lockevent_inc(rtlock_slow_sleep); schedule_rtlock(); + } raw_spin_lock_irq(&lock->wait_lock); set_current_state(TASK_RTLOCK_WAIT); @@ -1865,6 +1883,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, debug_rt_mutex_free_waiter(&waiter); trace_contention_end(lock, 0); + lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q)); } static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) |