diff options
author | Agustina Arzille <avarzille@riseup.net> | 2017-05-05 04:01:13 -1000 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-05-09 21:17:58 +0200 |
commit | bcbe8dc991a9ec63f30d50ed4cca89fd9e6b7ff7 (patch) | |
tree | 389517510b8d19082d02f36453dbf68356b548fb | |
parent | b5864f82b9c0d8ea2e825e7d86bd8cdeb825f870 (diff) |
Replace sequential consistency with more efficient orders
-rw-r--r-- | kern/bitmap.h | 4 | ||||
-rw-r--r-- | kern/mutex.c | 4 | ||||
-rw-r--r-- | kern/mutex_i.h | 4 | ||||
-rw-r--r-- | kern/rtmutex.c | 8 | ||||
-rw-r--r-- | kern/rtmutex_i.h | 4 | ||||
-rw-r--r-- | kern/semaphore_i.h | 4 | ||||
-rw-r--r-- | kern/spinlock.c | 6 | ||||
-rw-r--r-- | kern/spinlock_i.h | 4 | ||||
-rw-r--r-- | kern/sref.c | 8 | ||||
-rw-r--r-- | kern/syscnt.h | 2 | ||||
-rw-r--r-- | kern/thread.c | 2 | ||||
-rw-r--r-- | kern/thread.h | 4 | ||||
-rw-r--r-- | kern/thread_i.h | 4 |
13 files changed, 29 insertions, 29 deletions
diff --git a/kern/bitmap.h b/kern/bitmap.h index 6b7f2d79..a10fb512 100644 --- a/kern/bitmap.h +++ b/kern/bitmap.h @@ -78,7 +78,7 @@ bitmap_set_atomic(unsigned long *bm, int bit) bitmap_lookup(bm, bit); } - atomic_or(bm, bitmap_mask(bit), ATOMIC_SEQ_CST); + atomic_or_acq_rel(bm, bitmap_mask(bit)); } static inline void @@ -98,7 +98,7 @@ bitmap_clear_atomic(unsigned long *bm, int bit) bitmap_lookup(bm, bit); } - atomic_and(bm, ~bitmap_mask(bit), ATOMIC_SEQ_CST); + atomic_and_acq_rel(bm, ~bitmap_mask(bit)); } static inline int diff --git a/kern/mutex.c b/kern/mutex.c index 00077f26..7899bef9 100644 --- a/kern/mutex.c +++ b/kern/mutex.c @@ -34,7 +34,7 @@ mutex_lock_slow(struct mutex *mutex) sleepq = sleepq_lend(mutex, false, &flags); for (;;) { - state = atomic_swap_seq_cst(&mutex->state, MUTEX_CONTENDED); + state = atomic_swap_acquire(&mutex->state, MUTEX_CONTENDED); if (state == MUTEX_UNLOCKED) { break; @@ -44,7 +44,7 @@ mutex_lock_slow(struct mutex *mutex) } if (sleepq_empty(sleepq)) { - state = atomic_swap_seq_cst(&mutex->state, MUTEX_LOCKED); + state = atomic_swap_acquire(&mutex->state, MUTEX_LOCKED); assert(state == MUTEX_CONTENDED); } diff --git a/kern/mutex_i.h b/kern/mutex_i.h index d80cab21..a4a40eb5 100644 --- a/kern/mutex_i.h +++ b/kern/mutex_i.h @@ -31,7 +31,7 @@ static inline unsigned int mutex_lock_fast(struct mutex *mutex) { - return atomic_cas_seq_cst(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED); + return atomic_cas_acquire(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED); } static inline unsigned int @@ -39,7 +39,7 @@ mutex_unlock_fast(struct mutex *mutex) { unsigned int state; - state = atomic_swap_seq_cst(&mutex->state, MUTEX_UNLOCKED); + state = atomic_swap_release(&mutex->state, MUTEX_UNLOCKED); assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); return state; } diff --git a/kern/rtmutex.c b/kern/rtmutex.c index 3f4251f0..6f639ddf 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -29,7 +29,7 @@ static void rtmutex_set_contended(struct rtmutex *rtmutex) { - atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_SEQ_CST); + atomic_or_acq_rel(&rtmutex->owner, RTMUTEX_CONTENDED); } void @@ -49,7 +49,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex) bits = RTMUTEX_CONTENDED; for (;;) { - prev_owner = atomic_cas_seq_cst(&rtmutex->owner, bits, owner | bits); + prev_owner = atomic_cas_acquire(&rtmutex->owner, bits, owner | bits); assert((prev_owner & bits) == bits); if (prev_owner == bits) { @@ -64,7 +64,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex) turnstile_own(turnstile); if (turnstile_empty(turnstile)) { - prev_owner = atomic_swap_seq_cst(&rtmutex->owner, owner); + prev_owner = atomic_swap_acquire(&rtmutex->owner, owner); assert(prev_owner == (owner | bits)); } @@ -89,7 +89,7 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex) turnstile = turnstile_acquire(rtmutex); assert(turnstile != NULL); - prev_owner = atomic_swap_seq_cst(&rtmutex->owner, + prev_owner = atomic_swap_release(&rtmutex->owner, RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index d34fb5f4..2f2cc17f 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -57,7 +57,7 @@ rtmutex_lock_fast(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - return atomic_cas_seq_cst(&rtmutex->owner, 0, owner); + return atomic_cas_acquire(&rtmutex->owner, 0, owner); } static inline uintptr_t @@ -67,7 +67,7 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - prev_owner = atomic_cas_seq_cst(&rtmutex->owner, owner, 0); + prev_owner = atomic_cas_release(&rtmutex->owner, owner, 0); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); return prev_owner; } diff --git a/kern/semaphore_i.h b/kern/semaphore_i.h index 9aa68e07..54985062 100644 --- a/kern/semaphore_i.h +++ b/kern/semaphore_i.h @@ -37,7 +37,7 @@ semaphore_dec(struct semaphore *semaphore) break; } - prev = atomic_cas_seq_cst(&semaphore->value, value, value - 1); + prev = atomic_cas_acquire(&semaphore->value, value, value - 1); } while (prev != value); return value; @@ -48,7 +48,7 @@ semaphore_inc(struct semaphore *semaphore) { unsigned int prev; - prev = atomic_fetch_add(&semaphore->value, 1, ATOMIC_SEQ_CST); + prev = atomic_fetch_add(&semaphore->value, 1, ATOMIC_RELEASE); assert(prev != SEMAPHORE_VALUE_MAX); return prev; } diff --git a/kern/spinlock.c b/kern/spinlock.c index 8f42e9c2..f341406f 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -196,7 +196,7 @@ spinlock_store_first_qid(struct spinlock *lock, unsigned int newqid) do { oldval = read_once(lock->value); newval = newqid | (oldval & SPINLOCK_QID_MASK); - prev = atomic_cas_seq_cst(&lock->value, oldval, newval); + prev = atomic_cas_acquire(&lock->value, oldval, newval); } while (prev != oldval); } @@ -220,7 +220,7 @@ spinlock_swap_last_qid(struct spinlock *lock, unsigned int newqid) oldval = read_once(lock->value); newval = (oldval & (SPINLOCK_QID_MASK << SPINLOCK_QID_MAX_BITS)) | newqid; - prev = atomic_cas_seq_cst(&lock->value, oldval, newval); + prev = atomic_cas_acquire(&lock->value, oldval, newval); } while (prev != oldval); return prev & SPINLOCK_QID_MASK; @@ -231,7 +231,7 @@ spinlock_try_downgrade(struct spinlock *lock, unsigned int oldqid) { unsigned int prev; - prev = atomic_cas_seq_cst(&lock->value, oldqid, SPINLOCK_QID_LOCKED); + prev = atomic_cas_acquire(&lock->value, oldqid, SPINLOCK_QID_LOCKED); assert((prev >> SPINLOCK_QID_MAX_BITS) == 0); assert(prev != SPINLOCK_QID_NULL); diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index 444ced94..45018033 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -41,7 +41,7 @@ spinlock_lock_fast(struct spinlock *lock) { unsigned int prev; - prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); + prev = atomic_cas_acquire(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); if (unlikely(prev != SPINLOCK_UNLOCKED)) { return ERROR_BUSY; @@ -55,7 +55,7 @@ spinlock_unlock_fast(struct spinlock *lock) { unsigned int prev; - prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED); + prev = atomic_cas_release(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED); if (unlikely(prev != SPINLOCK_LOCKED)) { return ERROR_BUSY; diff --git a/kern/sref.c b/kern/sref.c index 9ce7a837..3f399a30 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -259,13 +259,13 @@ sref_weakref_init(struct sref_weakref *weakref, struct sref_counter *counter) static void sref_weakref_mark_dying(struct sref_weakref *weakref) { - atomic_or(&weakref->addr, SREF_WEAKREF_DYING, ATOMIC_SEQ_CST); + atomic_or_acq_rel(&weakref->addr, SREF_WEAKREF_DYING); } static void sref_weakref_clear_dying(struct sref_weakref *weakref) { - atomic_and(&weakref->addr, SREF_WEAKREF_MASK, ATOMIC_SEQ_CST); + atomic_and_acq_rel(&weakref->addr, SREF_WEAKREF_MASK); } static int @@ -274,7 +274,7 @@ sref_weakref_kill(struct sref_weakref *weakref) uintptr_t addr, oldval; addr = weakref->addr | SREF_WEAKREF_DYING; - oldval = atomic_cas_seq_cst(&weakref->addr, addr, (uintptr_t)NULL); + oldval = atomic_cas_release(&weakref->addr, addr, (uintptr_t)NULL); if (oldval != addr) { assert((oldval & SREF_WEAKREF_MASK) == (addr & SREF_WEAKREF_MASK)); @@ -292,7 +292,7 @@ sref_weakref_tryget(struct sref_weakref *weakref) do { addr = weakref->addr; newval = addr & SREF_WEAKREF_MASK; - oldval = atomic_cas_seq_cst(&weakref->addr, addr, newval); + oldval = atomic_cas_acquire(&weakref->addr, addr, newval); } while (oldval != addr); return (struct sref_counter *)newval; diff --git a/kern/syscnt.h b/kern/syscnt.h index c28f3b52..a4bae3d7 100644 --- a/kern/syscnt.h +++ b/kern/syscnt.h @@ -62,7 +62,7 @@ void syscnt_register(struct syscnt *syscnt, const char *name); static inline void syscnt_add(struct syscnt *syscnt, int64_t delta) { - atomic_add(&syscnt->value, delta, ATOMIC_SEQ_CST); + atomic_add(&syscnt->value, delta, ATOMIC_RELAXED); } static inline uint64_t diff --git a/kern/thread.c b/kern/thread.c index 42150e6d..12d42453 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -2724,7 +2724,7 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor) { unsigned int key; - key = atomic_fetch_add(&thread_nr_keys, 1, ATOMIC_SEQ_CST); + key = atomic_fetch_add(&thread_nr_keys, 1, ATOMIC_RELAXED); if (key >= THREAD_KEYS_MAX) { panic("thread: maximum number of keys exceeded"); diff --git a/kern/thread.h b/kern/thread.h index 3fa2a1f8..ee58c43f 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -270,7 +270,7 @@ thread_ref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_SEQ_CST); + nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -279,7 +279,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_SEQ_CST); + nr_refs = atomic_fetch_sub_acq_rel(&thread->nr_refs, 1); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread_i.h b/kern/thread_i.h index 34761e56..2e1b88aa 100644 --- a/kern/thread_i.h +++ b/kern/thread_i.h @@ -195,13 +195,13 @@ void thread_destroy(struct thread *thread); static inline void thread_set_flag(struct thread *thread, unsigned long flag) { - atomic_or(&thread->flags, flag, ATOMIC_SEQ_CST); + atomic_or_acq_rel(&thread->flags, flag); } static inline void thread_clear_flag(struct thread *thread, unsigned long flag) { - atomic_and(&thread->flags, ~flag, ATOMIC_SEQ_CST); + atomic_and_acq_rel(&thread->flags, ~flag); } static inline int |