diff options
author | Agustina Arzille <avarzille@riseup.net> | 2017-04-03 16:09:51 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-04-04 22:07:06 +0200 |
commit | b1730c99f882fc2662c6b64371a4b11a8231bb9f (patch) | |
tree | c4fa5fa51287aee6d6cb372f1cfa8f6413ababd6 /kern | |
parent | d5bb14cf6a8305bda2a5a73ce727e5309996a20a (diff) |
Use the new atomic operations interface
Stick to a sequentially consistent model for most atomic operations as it
matches the semantics of the existing code. Each call site will have to be
reevaluated in order to switch to more relaxed accesses where possible.
Diffstat (limited to 'kern')
-rw-r--r-- | kern/bitmap.h | 6 | ||||
-rw-r--r-- | kern/mutex.c | 4 | ||||
-rw-r--r-- | kern/mutex_i.h | 6 | ||||
-rw-r--r-- | kern/panic.c | 4 | ||||
-rw-r--r-- | kern/rtmutex.c | 16 | ||||
-rw-r--r-- | kern/rtmutex_i.h | 6 | ||||
-rw-r--r-- | kern/semaphore_i.h | 6 | ||||
-rw-r--r-- | kern/spinlock.c | 8 | ||||
-rw-r--r-- | kern/spinlock_i.h | 7 | ||||
-rw-r--r-- | kern/sref.c | 8 | ||||
-rw-r--r-- | kern/syscnt.c | 5 | ||||
-rw-r--r-- | kern/syscnt.h | 12 | ||||
-rw-r--r-- | kern/syscnt_types.h | 3 | ||||
-rw-r--r-- | kern/thread.c | 4 | ||||
-rw-r--r-- | kern/thread.h | 6 | ||||
-rw-r--r-- | kern/thread_i.h | 9 |
16 files changed, 53 insertions, 57 deletions
diff --git a/kern/bitmap.h b/kern/bitmap.h index 77dfac1b..6b7f2d79 100644 --- a/kern/bitmap.h +++ b/kern/bitmap.h @@ -26,9 +26,9 @@ #include <string.h> +#include <kern/atomic.h> #include <kern/bitmap_i.h> #include <kern/limits.h> -#include <machine/atomic.h> #define BITMAP_DECLARE(name, nr_bits) unsigned long name[BITMAP_LONGS(nr_bits)] @@ -78,7 +78,7 @@ bitmap_set_atomic(unsigned long *bm, int bit) bitmap_lookup(bm, bit); } - atomic_or_ulong(bm, bitmap_mask(bit)); + atomic_or(bm, bitmap_mask(bit), ATOMIC_SEQ_CST); } static inline void @@ -98,7 +98,7 @@ bitmap_clear_atomic(unsigned long *bm, int bit) bitmap_lookup(bm, bit); } - atomic_and_ulong(bm, ~bitmap_mask(bit)); + atomic_and(bm, ~bitmap_mask(bit), ATOMIC_SEQ_CST); } static inline int diff --git a/kern/mutex.c b/kern/mutex.c index 353d94a7..00077f26 100644 --- a/kern/mutex.c +++ b/kern/mutex.c @@ -34,7 +34,7 @@ mutex_lock_slow(struct mutex *mutex) sleepq = sleepq_lend(mutex, false, &flags); for (;;) { - state = atomic_swap_uint(&mutex->state, MUTEX_CONTENDED); + state = atomic_swap_seq_cst(&mutex->state, MUTEX_CONTENDED); if (state == MUTEX_UNLOCKED) { break; @@ -44,7 +44,7 @@ mutex_lock_slow(struct mutex *mutex) } if (sleepq_empty(sleepq)) { - state = atomic_swap_uint(&mutex->state, MUTEX_LOCKED); + state = atomic_swap_seq_cst(&mutex->state, MUTEX_LOCKED); assert(state == MUTEX_CONTENDED); } diff --git a/kern/mutex_i.h b/kern/mutex_i.h index 158d8f7f..69131cf8 100644 --- a/kern/mutex_i.h +++ b/kern/mutex_i.h @@ -21,8 +21,8 @@ #ifndef X15_MUTEX_PI #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/mutex_types.h> -#include <machine/atomic.h> #define MUTEX_UNLOCKED 0 #define MUTEX_LOCKED 1 @@ -31,7 +31,7 @@ static inline unsigned int mutex_tryacquire(struct mutex *mutex) { - return atomic_cas_uint(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED); + return atomic_cas_seq_cst(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED); } static inline unsigned int @@ -39,7 +39,7 @@ mutex_release(struct mutex *mutex) { unsigned int state; - state = atomic_swap_uint(&mutex->state, MUTEX_UNLOCKED); + state = atomic_swap_seq_cst(&mutex->state, MUTEX_UNLOCKED); assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); return state; } diff --git a/kern/panic.c b/kern/panic.c index e0bf30cc..a9599f26 100644 --- a/kern/panic.c +++ b/kern/panic.c @@ -17,9 +17,9 @@ #include <stdarg.h> +#include <kern/atomic.h> #include <kern/panic.h> #include <kern/printk.h> -#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/strace.h> @@ -31,7 +31,7 @@ panic(const char *format, ...) va_list list; unsigned long already_done; - already_done = atomic_swap_uint(&panic_done, 1); + already_done = atomic_swap_seq_cst(&panic_done, 1); if (already_done) { for (;;) { diff --git a/kern/rtmutex.c b/kern/rtmutex.c index f79f8a88..3f4251f0 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -19,23 +19,17 @@ #include <stdint.h> #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/rtmutex.h> #include <kern/rtmutex_i.h> #include <kern/rtmutex_types.h> #include <kern/thread.h> #include <kern/turnstile.h> -#include <machine/atomic.h> static void rtmutex_set_contended(struct rtmutex *rtmutex) { - uintptr_t owner, prev_owner; - - do { - owner = rtmutex->owner; - prev_owner = atomic_cas_uintptr(&rtmutex->owner, owner, - owner | RTMUTEX_CONTENDED); - } while (prev_owner != owner); + atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_SEQ_CST); } void @@ -55,7 +49,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex) bits = RTMUTEX_CONTENDED; for (;;) { - prev_owner = atomic_cas_uintptr(&rtmutex->owner, bits, owner | bits); + prev_owner = atomic_cas_seq_cst(&rtmutex->owner, bits, owner | bits); assert((prev_owner & bits) == bits); if (prev_owner == bits) { @@ -70,7 +64,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex) turnstile_own(turnstile); if (turnstile_empty(turnstile)) { - prev_owner = atomic_swap_uintptr(&rtmutex->owner, owner); + prev_owner = atomic_swap_seq_cst(&rtmutex->owner, owner); assert(prev_owner == (owner | bits)); } @@ -95,7 +89,7 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex) turnstile = turnstile_acquire(rtmutex); assert(turnstile != NULL); - prev_owner = atomic_swap_uintptr(&rtmutex->owner, + prev_owner = atomic_swap_seq_cst(&rtmutex->owner, RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index b99b5bd0..8825b047 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -22,9 +22,9 @@ #include <stdint.h> #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/rtmutex_types.h> #include <kern/thread.h> -#include <machine/atomic.h> /* * Real-time mutex flags. @@ -57,7 +57,7 @@ rtmutex_tryacquire(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - return atomic_cas_uintptr(&rtmutex->owner, 0, owner); + return atomic_cas_seq_cst(&rtmutex->owner, 0, owner); } static inline uintptr_t @@ -67,7 +67,7 @@ rtmutex_tryrelease(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - prev_owner = atomic_cas_uintptr(&rtmutex->owner, owner, 0); + prev_owner = atomic_cas_seq_cst(&rtmutex->owner, owner, 0); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); return prev_owner; } diff --git a/kern/semaphore_i.h b/kern/semaphore_i.h index c6769c12..9aa68e07 100644 --- a/kern/semaphore_i.h +++ b/kern/semaphore_i.h @@ -19,7 +19,7 @@ #define _KERN_SEMAPHORE_I_H #include <kern/assert.h> -#include <machine/atomic.h> +#include <kern/atomic.h> struct semaphore { unsigned int value; @@ -37,7 +37,7 @@ semaphore_dec(struct semaphore *semaphore) break; } - prev = atomic_cas_uint(&semaphore->value, value, value - 1); + prev = atomic_cas_seq_cst(&semaphore->value, value, value - 1); } while (prev != value); return value; @@ -48,7 +48,7 @@ semaphore_inc(struct semaphore *semaphore) { unsigned int prev; - prev = atomic_fetchadd_uint(&semaphore->value, 1); + prev = atomic_fetch_add(&semaphore->value, 1, ATOMIC_SEQ_CST); assert(prev != SEMAPHORE_VALUE_MAX); return prev; } diff --git a/kern/spinlock.c b/kern/spinlock.c index 948d93cf..0e6f2fa8 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -56,6 +56,7 @@ #include <stddef.h> #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/error.h> #include <kern/macros.h> #include <kern/percpu.h> @@ -63,7 +64,6 @@ #include <kern/spinlock_i.h> #include <kern/spinlock_types.h> #include <kern/thread.h> -#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/mb.h> @@ -196,7 +196,7 @@ spinlock_store_first_qid(struct spinlock *lock, unsigned int newqid) do { oldval = read_once(lock->value); newval = newqid | (oldval & SPINLOCK_QID_MASK); - prev = atomic_cas_uint(&lock->value, oldval, newval); + prev = atomic_cas_seq_cst(&lock->value, oldval, newval); } while (prev != oldval); } @@ -220,7 +220,7 @@ spinlock_swap_last_qid(struct spinlock *lock, unsigned int newqid) oldval = read_once(lock->value); newval = (oldval & (SPINLOCK_QID_MASK << SPINLOCK_QID_MAX_BITS)) | newqid; - prev = atomic_cas_uint(&lock->value, oldval, newval); + prev = atomic_cas_seq_cst(&lock->value, oldval, newval); } while (prev != oldval); return prev & SPINLOCK_QID_MASK; @@ -231,7 +231,7 @@ spinlock_try_downgrade(struct spinlock *lock, unsigned int oldqid) { unsigned int prev; - prev = atomic_cas_uint(&lock->value, oldqid, SPINLOCK_QID_LOCKED); + prev = atomic_cas_seq_cst(&lock->value, oldqid, SPINLOCK_QID_LOCKED); assert((prev >> SPINLOCK_QID_MAX_BITS) == 0); assert(prev != SPINLOCK_QID_NULL); diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index 69de0b01..bf9bf2c7 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -21,9 +21,10 @@ #include <stddef.h> #include <stdint.h> +#include <kern/assert.h> +#include <kern/atomic.h> #include <kern/error.h> #include <kern/spinlock_types.h> -#include <machine/atomic.h> #include <machine/cpu.h> /* @@ -39,7 +40,7 @@ spinlock_lock_fast(struct spinlock *lock) { unsigned int prev; - prev = atomic_cas_uint(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); + prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); if (prev != SPINLOCK_UNLOCKED) { return ERROR_BUSY; @@ -53,7 +54,7 @@ spinlock_unlock_fast(struct spinlock *lock) { unsigned int prev; - prev = atomic_cas_uint(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED); + prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED); if (prev != SPINLOCK_LOCKED) { return ERROR_BUSY; diff --git a/kern/sref.c b/kern/sref.c index 4ae6ae16..23fb0ad6 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -259,13 +259,13 @@ sref_weakref_init(struct sref_weakref *weakref, struct sref_counter *counter) static void sref_weakref_mark_dying(struct sref_weakref *weakref) { - atomic_or_uintptr(&weakref->addr, SREF_WEAKREF_DYING); + atomic_or(&weakref->addr, SREF_WEAKREF_DYING, ATOMIC_SEQ_CST); } static void sref_weakref_clear_dying(struct sref_weakref *weakref) { - atomic_and_uintptr(&weakref->addr, SREF_WEAKREF_MASK); + atomic_and(&weakref->addr, SREF_WEAKREF_MASK, ATOMIC_SEQ_CST); } static int @@ -274,7 +274,7 @@ sref_weakref_kill(struct sref_weakref *weakref) uintptr_t addr, oldval; addr = weakref->addr | SREF_WEAKREF_DYING; - oldval = atomic_cas_uintptr(&weakref->addr, addr, (uintptr_t)NULL); + oldval = atomic_cas_seq_cst(&weakref->addr, addr, (uintptr_t)NULL); if (oldval != addr) { assert((oldval & SREF_WEAKREF_MASK) == (addr & SREF_WEAKREF_MASK)); @@ -292,7 +292,7 @@ sref_weakref_tryget(struct sref_weakref *weakref) do { addr = weakref->addr; newval = addr & SREF_WEAKREF_MASK; - oldval = atomic_cas_uintptr(&weakref->addr, addr, newval); + oldval = atomic_cas_seq_cst(&weakref->addr, addr, newval); } while (oldval != addr); return (struct sref_counter *)newval; diff --git a/kern/syscnt.c b/kern/syscnt.c index 5c4c3608..4336ef1e 100644 --- a/kern/syscnt.c +++ b/kern/syscnt.c @@ -17,6 +17,7 @@ #include <string.h> +#include <kern/atomic.h> #include <kern/init.h> #include <kern/list.h> #include <kern/mutex.h> @@ -40,9 +41,9 @@ syscnt_setup(void) void __init syscnt_register(struct syscnt *syscnt, const char *name) { -#ifndef __LP64__ +#ifndef ATOMIC_HAVE_64B_OPS spinlock_init(&syscnt->lock); -#endif /* __LP64__ */ +#endif syscnt->value = 0; strlcpy(syscnt->name, name, sizeof(syscnt->name)); diff --git a/kern/syscnt.h b/kern/syscnt.h index fef8115c..c28f3b52 100644 --- a/kern/syscnt.h +++ b/kern/syscnt.h @@ -26,9 +26,9 @@ #include <stdint.h> +#include <kern/atomic.h> #include <kern/macros.h> #include <kern/spinlock.h> -#include <machine/atomic.h> /* * Size of the buffer storing a system counter name. @@ -57,21 +57,21 @@ void syscnt_setup(void); */ void syscnt_register(struct syscnt *syscnt, const char *name); -#ifdef __LP64__ +#ifdef ATOMIC_HAVE_64B_OPS static inline void syscnt_add(struct syscnt *syscnt, int64_t delta) { - atomic_add_ulong(&syscnt->value, delta); + atomic_add(&syscnt->value, delta, ATOMIC_SEQ_CST); } static inline uint64_t syscnt_read(const struct syscnt *syscnt) { - return read_once(syscnt->value); + return atomic_load((uint64_t *)&syscnt->value, ATOMIC_RELAXED); } -#else /* __LP64__ */ +#else /* ATOMIC_HAVE_64B_OPS */ static inline void syscnt_add(struct syscnt *syscnt, int64_t delta) @@ -96,7 +96,7 @@ syscnt_read(struct syscnt *syscnt) return value; } -#endif /* __LP64__ */ +#endif /* ATOMIC_HAVE_64B_OPS */ static inline void syscnt_inc(struct syscnt *syscnt) diff --git a/kern/syscnt_types.h b/kern/syscnt_types.h index 5b429b76..81fb7986 100644 --- a/kern/syscnt_types.h +++ b/kern/syscnt_types.h @@ -23,6 +23,7 @@ #include <stdint.h> +#include <kern/atomic.h> #include <kern/list_types.h> #include <kern/spinlock_types.h> @@ -31,7 +32,7 @@ * on 32-bits ones. */ struct syscnt { -#ifndef __LP64__ +#ifndef ATOMIC_HAVE_64B_OPS struct spinlock lock; #endif /* __LP64__ */ diff --git a/kern/thread.c b/kern/thread.c index 4d8ce74a..8a87acaa 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -86,6 +86,7 @@ #include <string.h> #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/condition.h> #include <kern/cpumap.h> #include <kern/error.h> @@ -107,7 +108,6 @@ #include <kern/thread.h> #include <kern/turnstile.h> #include <kern/work.h> -#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/mb.h> #include <machine/pmap.h> @@ -2723,7 +2723,7 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor) { unsigned int key; - key = atomic_fetchadd_uint(&thread_nr_keys, 1); + key = atomic_fetch_add(&thread_nr_keys, 1, ATOMIC_SEQ_CST); if (key >= THREAD_KEYS_MAX) { panic("thread: maximum number of keys exceeded"); diff --git a/kern/thread.h b/kern/thread.h index c7f68f14..3fa2a1f8 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -37,12 +37,12 @@ #include <stddef.h> #include <kern/assert.h> +#include <kern/atomic.h> #include <kern/condition.h> #include <kern/cpumap.h> #include <kern/macros.h> #include <kern/spinlock_types.h> #include <kern/turnstile_types.h> -#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/tcb.h> @@ -270,7 +270,7 @@ thread_ref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetchadd_ulong(&thread->nr_refs, 1); + nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_SEQ_CST); assert(nr_refs != (unsigned long)-1); } @@ -279,7 +279,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetchadd_ulong(&thread->nr_refs, -1); + nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_SEQ_CST); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread_i.h b/kern/thread_i.h index b0e0074f..34761e56 100644 --- a/kern/thread_i.h +++ b/kern/thread_i.h @@ -20,6 +20,7 @@ #include <stdbool.h> +#include <kern/atomic.h> #include <kern/condition_types.h> #include <kern/cpumap.h> #include <kern/list_types.h> @@ -27,7 +28,6 @@ #include <kern/mutex_types.h> #include <kern/param.h> #include <kern/turnstile_types.h> -#include <machine/atomic.h> #include <machine/tcb.h> /* @@ -195,20 +195,19 @@ void thread_destroy(struct thread *thread); static inline void thread_set_flag(struct thread *thread, unsigned long flag) { - atomic_or_ulong(&thread->flags, flag); + atomic_or(&thread->flags, flag, ATOMIC_SEQ_CST); } static inline void thread_clear_flag(struct thread *thread, unsigned long flag) { - atomic_and_ulong(&thread->flags, ~flag); + atomic_and(&thread->flags, ~flag, ATOMIC_SEQ_CST); } static inline int thread_test_flag(struct thread *thread, unsigned long flag) { - barrier(); - return ((thread->flags & flag) != 0); + return (atomic_load(&thread->flags, ATOMIC_ACQUIRE) & flag) != 0; } #endif /* _KERN_THREAD_I_H */ |