diff options
author | Jiri Kosina <jkosina@suse.com> | 2025-03-26 13:42:07 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.com> | 2025-03-26 13:42:07 +0100 |
commit | b3cc7428a32202936904b5b07cf9f135025bafd6 (patch) | |
tree | d4a1a6180ac5939fccd92acd6f8d7d1388575c4a /lib/atomic64.c | |
parent | db52926fb0be40e1d588a346df73f5ea3a34a4c6 (diff) | |
parent | 01601fdd40ecf4467c8ae4d215dbb7d2a0599a2c (diff) |
Merge branch 'for-6.15/amd_sfh' into for-linus
From: Mario Limonciello <mario.limonciello@amd.com>
Some platforms include a human presence detection (HPD) sensor. When
enabled and a user is detected a wake event will be emitted from the
sensor fusion hub that software can react to.
Example use cases are "wake from suspend on approach" or to "lock
when leaving".
This is currently enabled by default on supported systems, but users
can't control it. This essentially means that wake on approach is
enabled which is a really surprising behavior to users that don't
expect it.
Instead of defaulting to enabled add a sysfs knob that users can
use to enable the feature if desirable and set it to disabled by
default.
Diffstat (limited to 'lib/atomic64.c')
-rw-r--r-- | lib/atomic64.c | 78 |
1 files changed, 48 insertions, 30 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c index caf895789a1ee..1a72bba36d243 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -25,15 +25,15 @@ * Ensure each lock is in a separate cacheline. */ static union { - raw_spinlock_t lock; + arch_spinlock_t lock; char pad[L1_CACHE_BYTES]; } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { [0 ... (NR_LOCKS - 1)] = { - .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), + .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, }; -static inline raw_spinlock_t *lock_addr(const atomic64_t *v) +static inline arch_spinlock_t *lock_addr(const atomic64_t *v) { unsigned long addr = (unsigned long) v; @@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) s64 generic_atomic64_read(const atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_read); @@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read); void generic_atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); v->counter = i; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); } EXPORT_SYMBOL(generic_atomic64_set); @@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set); void generic_atomic64_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ v->counter c_op a; \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ } \ EXPORT_SYMBOL(generic_atomic64_##op); @@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op); s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ val = (v->counter c_op a); \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ return val; \ } \ EXPORT_SYMBOL(generic_atomic64_##op##_return); @@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return); s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ val = v->counter; \ v->counter c_op a; \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ return val; \ } \ EXPORT_SYMBOL(generic_atomic64_fetch_##op); @@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=) s64 generic_atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter - 1; if (val >= 0) v->counter = val; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_dec_if_positive); @@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive); s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; if (val == o) v->counter = n; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_cmpxchg); @@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg); s64 generic_atomic64_xchg(atomic64_t *v, s64 new) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; v->counter = new; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_xchg); @@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg); s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; if (val != u) v->counter += a; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } |