From 8790d75d93c7e06b7b93c1078260aaf1e4ea25ca Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Wed, 18 Apr 2018 21:45:12 +0200 Subject: kern/atomic: remove shortcuts These shortcuts just don't bring enough value. --- kern/atomic.h | 32 +------------------------------- kern/clock.c | 4 ++-- kern/clock.h | 4 ++-- kern/mutex/mutex_adaptive.c | 8 ++++---- kern/mutex/mutex_adaptive_i.h | 6 ++++-- kern/mutex/mutex_plain.c | 2 +- kern/mutex/mutex_plain_i.h | 5 +++-- kern/rcu.c | 2 +- kern/rtmutex.c | 7 ++++--- kern/rtmutex_i.h | 4 ++-- kern/semaphore_i.h | 2 +- kern/spinlock.c | 2 +- kern/spinlock_i.h | 3 ++- kern/task.h | 2 +- kern/thread.h | 2 +- vm/vm_page.h | 5 +++-- 16 files changed, 33 insertions(+), 57 deletions(-) diff --git a/kern/atomic.h b/kern/atomic.h index 5d99da9..d37a28b 100644 --- a/kern/atomic.h +++ b/kern/atomic.h @@ -104,36 +104,6 @@ MACRO_END #define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo) #endif -/* - * Thread fences. - */ - -#define atomic_fence_acquire() __atomic_thread_fence(ATOMIC_ACQUIRE) -#define atomic_fence_release() __atomic_thread_fence(ATOMIC_RELEASE) -#define atomic_fence_acq_rel() __atomic_thread_fence(ATOMIC_ACQ_REL) -#define atomic_fence_seq_cst() __atomic_thread_fence(ATOMIC_SEQ_CST) - -/* - * Common shortcuts. - */ - -#define atomic_load_acquire(ptr) atomic_load(ptr, ATOMIC_ACQUIRE) -#define atomic_store_release(ptr, val) atomic_store(ptr, val, ATOMIC_RELEASE) - -#define atomic_cas_acquire(ptr, oval, nval) \ - atomic_cas(ptr, oval, nval, ATOMIC_ACQUIRE) - -#define atomic_cas_release(ptr, oval, nval) \ - atomic_cas(ptr, oval, nval, ATOMIC_RELEASE) - -#define atomic_cas_acq_rel(ptr, oval, nval) \ - atomic_cas(ptr, oval, nval, ATOMIC_ACQ_REL) - -#define atomic_swap_acquire(ptr, val) atomic_swap(ptr, val, ATOMIC_ACQUIRE) -#define atomic_swap_release(ptr, val) atomic_swap(ptr, val, ATOMIC_RELEASE) -#define atomic_swap_acq_rel(ptr, val) atomic_swap(ptr, val, ATOMIC_ACQ_REL) - -#define atomic_fetch_sub_acq_rel(ptr, val) \ - atomic_fetch_sub(ptr, val, ATOMIC_ACQ_REL) +#define atomic_fence(mo) __atomic_thread_fence(mo) #endif /* KERN_ATOMIC_H */ diff --git a/kern/clock.c b/kern/clock.c index 0b72a8f..27fb9a2 100644 --- a/kern/clock.c +++ b/kern/clock.c @@ -82,8 +82,8 @@ void clock_tick_intr(void) t.ticks++; atomic_store(&clock_global_time.high2, t.high1, ATOMIC_RELAXED); - atomic_store_release(&clock_global_time.low, t.low); - atomic_store_release(&clock_global_time.high1, t.high1); + atomic_store(&clock_global_time.low, t.low, ATOMIC_RELEASE); + atomic_store(&clock_global_time.high1, t.high1, ATOMIC_RELEASE); #endif /* ATOMIC_HAVE_64B_OPS */ } diff --git a/kern/clock.h b/kern/clock.h index 7beae9c..a5b1f69 100644 --- a/kern/clock.h +++ b/kern/clock.h @@ -80,8 +80,8 @@ clock_get_time(void) */ do { - high1 = atomic_load_acquire(&clock_global_time.high1); - low = atomic_load_acquire(&clock_global_time.low); + high1 = atomic_load(&clock_global_time.high1, ATOMIC_ACQUIRE); + low = atomic_load(&clock_global_time.low, ATOMIC_ACQUIRE); high2 = atomic_load(&clock_global_time.high2, ATOMIC_RELAXED); } while (high1 != high2); diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c index b2af456..d17ad9d 100644 --- a/kern/mutex/mutex_adaptive.c +++ b/kern/mutex/mutex_adaptive.c @@ -135,8 +135,8 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) mutex_adaptive_set_contended(mutex); do { - owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, - self | MUTEX_ADAPTIVE_CONTENDED); + owner = atomic_cas(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, + self | MUTEX_ADAPTIVE_CONTENDED, ATOMIC_ACQUIRE); assert(owner & MUTEX_ADAPTIVE_CONTENDED); if (mutex_adaptive_get_thread(owner) == NULL) { @@ -254,8 +254,8 @@ mutex_adaptive_unlock_slow(struct mutex *mutex) self = (uintptr_t)thread_self() | MUTEX_ADAPTIVE_CONTENDED; for (;;) { - owner = atomic_cas_release(&mutex->owner, self, - MUTEX_ADAPTIVE_CONTENDED); + owner = atomic_cas(&mutex->owner, self, + MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE); if (owner == self) { break; diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h index 05e9764..b123251 100644 --- a/kern/mutex/mutex_adaptive_i.h +++ b/kern/mutex/mutex_adaptive_i.h @@ -55,7 +55,8 @@ mutex_adaptive_lock_fast(struct mutex *mutex) { uintptr_t owner; - owner = atomic_cas_acquire(&mutex->owner, 0, (uintptr_t)thread_self()); + owner = atomic_cas(&mutex->owner, 0, + (uintptr_t)thread_self(), ATOMIC_ACQUIRE); if (unlikely(owner != 0)) { return EBUSY; @@ -69,7 +70,8 @@ mutex_adaptive_unlock_fast(struct mutex *mutex) { uintptr_t owner; - owner = atomic_cas_release(&mutex->owner, (uintptr_t)thread_self(), 0); + owner = atomic_cas(&mutex->owner, (uintptr_t)thread_self(), + 0, ATOMIC_RELEASE); if (unlikely(owner & MUTEX_ADAPTIVE_CONTENDED)) { return EBUSY; diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c index f12f13f..55e7a25 100644 --- a/kern/mutex/mutex_plain.c +++ b/kern/mutex/mutex_plain.c @@ -84,7 +84,7 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) sleepq = sleepq_lend(mutex, false, &flags); for (;;) { - state = atomic_swap_release(&mutex->state, MUTEX_CONTENDED); + state = atomic_swap(&mutex->state, MUTEX_CONTENDED, ATOMIC_RELEASE); if (state == MUTEX_UNLOCKED) { break; diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h index d28fd92..a792829 100644 --- a/kern/mutex/mutex_plain_i.h +++ b/kern/mutex/mutex_plain_i.h @@ -49,7 +49,8 @@ mutex_plain_lock_fast(struct mutex *mutex) { unsigned int state; - state = atomic_cas_acquire(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED); + state = atomic_cas(&mutex->state, MUTEX_UNLOCKED, + MUTEX_LOCKED, ATOMIC_ACQUIRE); if (unlikely(state != MUTEX_UNLOCKED)) { return EBUSY; @@ -63,7 +64,7 @@ mutex_plain_unlock_fast(struct mutex *mutex) { unsigned int state; - state = atomic_swap_release(&mutex->state, MUTEX_UNLOCKED); + state = atomic_swap(&mutex->state, MUTEX_UNLOCKED, ATOMIC_RELEASE); if (unlikely(state == MUTEX_CONTENDED)) { return EBUSY; diff --git a/kern/rcu.c b/kern/rcu.c index 834a99b..9f2b993 100644 --- a/kern/rcu.c +++ b/kern/rcu.c @@ -292,7 +292,7 @@ rcu_data_check_gp_state(const struct rcu_data *data, *global_gp_state = atomic_load(&data->gp_state, ATOMIC_RELAXED); if (unlikely(local_gp_state != *global_gp_state)) { - atomic_fence_acquire(); + atomic_fence(ATOMIC_ACQUIRE); return true; } diff --git a/kern/rtmutex.c b/kern/rtmutex.c index 55d17cd..6567b06 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -107,7 +107,7 @@ rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks) bits = RTMUTEX_CONTENDED; for (;;) { - owner = atomic_cas_acquire(&rtmutex->owner, bits, self | bits); + owner = atomic_cas(&rtmutex->owner, bits, self | bits, ATOMIC_ACQUIRE); assert((owner & bits) == bits); if (owner == bits) { @@ -211,8 +211,9 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex) } } - owner = atomic_swap_release(&rtmutex->owner, - RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED); + owner = atomic_swap(&rtmutex->owner, + RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED, + ATOMIC_RELEASE); assert(rtmutex_get_thread(owner) == thread_self()); turnstile_disown(turnstile); diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index 9d3689d..64ff69a 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -57,7 +57,7 @@ rtmutex_lock_fast(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - return atomic_cas_acquire(&rtmutex->owner, 0, owner); + return atomic_cas(&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE); } static inline uintptr_t @@ -67,7 +67,7 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex) owner = (uintptr_t)thread_self(); rtmutex_assert_owner_aligned(owner); - prev_owner = atomic_cas_release(&rtmutex->owner, owner, 0); + prev_owner = atomic_cas(&rtmutex->owner, owner, 0, ATOMIC_RELEASE); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); return prev_owner; } diff --git a/kern/semaphore_i.h b/kern/semaphore_i.h index a4c50b8..d58ad0b 100644 --- a/kern/semaphore_i.h +++ b/kern/semaphore_i.h @@ -39,7 +39,7 @@ semaphore_dec(struct semaphore *semaphore) break; } - prev = atomic_cas_acquire(&semaphore->value, value, value - 1); + prev = atomic_cas(&semaphore->value, value, value - 1, ATOMIC_ACQUIRE); } while (prev != value); return value; diff --git a/kern/spinlock.c b/kern/spinlock.c index a1c013c..71e60cb 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -260,7 +260,7 @@ spinlock_get_remote_qnode(uint32_t qid) unsigned int ctx, cpu; /* This fence synchronizes with queueing */ - atomic_fence_acquire(); + atomic_fence(ATOMIC_ACQUIRE); ctx = spinlock_qid_ctx(qid); cpu = spinlock_qid_cpu(qid); diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index da1233b..a3e2fe1 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -65,7 +65,8 @@ spinlock_lock_fast(struct spinlock *lock) { uint32_t prev; - prev = atomic_cas_acquire(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); + prev = atomic_cas(&lock->value, SPINLOCK_UNLOCKED, + SPINLOCK_LOCKED, ATOMIC_ACQUIRE); if (unlikely(prev != SPINLOCK_UNLOCKED)) { return EBUSY; diff --git a/kern/task.h b/kern/task.h index 5355c47..d6e9eb4 100644 --- a/kern/task.h +++ b/kern/task.h @@ -64,7 +64,7 @@ task_unref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub_acq_rel(&task->nr_refs, 1); + nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread.h b/kern/thread.h index 3f1ced3..eba9bf2 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -297,7 +297,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub_acq_rel(&thread->nr_refs, 1); + nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/vm/vm_page.h b/vm/vm_page.h index 8cd7a11..4f2aaf9 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -252,7 +252,7 @@ vm_page_unref(struct vm_page *page) { unsigned int nr_refs; - nr_refs = atomic_fetch_sub_acq_rel(&page->nr_refs, 1); + nr_refs = atomic_fetch_sub(&page->nr_refs, 1, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { @@ -272,7 +272,8 @@ vm_page_tryref(struct vm_page *page) return EAGAIN; } - prev = atomic_cas_acquire(&page->nr_refs, nr_refs, nr_refs + 1); + prev = atomic_cas(&page->nr_refs, nr_refs, + nr_refs + 1, ATOMIC_ACQUIRE); } while (prev != nr_refs); return 0; -- cgit v1.2.3