summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRemy Noel <mocramis@gmail.com>2018-04-21 00:03:24 +0200
committerRemy Noel <mocramis@gmail.com>2018-04-21 00:03:24 +0200
commit6b98275f3864ee6a413e99b9bddc7a3c029af2d2 (patch)
treee30c300f8e12e2468f5ca1d778cbc3ce9c11a975
parent6b3df26a8822b08f87a2aab5085d4e51c5168583 (diff)
parent89ebd57e0bb3a63f2f794ce41dd4c831b4aeb35c (diff)
Merge branch 'master' into perfmon
-rw-r--r--kern/atomic.h32
-rw-r--r--kern/clock.c4
-rw-r--r--kern/clock.h4
-rw-r--r--kern/mutex/mutex_adaptive.c8
-rw-r--r--kern/mutex/mutex_adaptive_i.h6
-rw-r--r--kern/mutex/mutex_plain.c2
-rw-r--r--kern/mutex/mutex_plain_i.h5
-rw-r--r--kern/rcu.c2
-rw-r--r--kern/rtmutex.c7
-rw-r--r--kern/rtmutex_i.h4
-rw-r--r--kern/semaphore_i.h2
-rw-r--r--kern/spinlock.c2
-rw-r--r--kern/spinlock_i.h3
-rw-r--r--kern/task.h2
-rw-r--r--kern/thread.h2
-rwxr-xr-xtools/build_configs.py4
-rw-r--r--vm/vm_page.h5
17 files changed, 35 insertions, 59 deletions
diff --git a/kern/atomic.h b/kern/atomic.h
index 5d99da9..d37a28b 100644
--- a/kern/atomic.h
+++ b/kern/atomic.h
@@ -104,36 +104,6 @@ MACRO_END
#define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo)
#endif
-/*
- * Thread fences.
- */
-
-#define atomic_fence_acquire() __atomic_thread_fence(ATOMIC_ACQUIRE)
-#define atomic_fence_release() __atomic_thread_fence(ATOMIC_RELEASE)
-#define atomic_fence_acq_rel() __atomic_thread_fence(ATOMIC_ACQ_REL)
-#define atomic_fence_seq_cst() __atomic_thread_fence(ATOMIC_SEQ_CST)
-
-/*
- * Common shortcuts.
- */
-
-#define atomic_load_acquire(ptr) atomic_load(ptr, ATOMIC_ACQUIRE)
-#define atomic_store_release(ptr, val) atomic_store(ptr, val, ATOMIC_RELEASE)
-
-#define atomic_cas_acquire(ptr, oval, nval) \
- atomic_cas(ptr, oval, nval, ATOMIC_ACQUIRE)
-
-#define atomic_cas_release(ptr, oval, nval) \
- atomic_cas(ptr, oval, nval, ATOMIC_RELEASE)
-
-#define atomic_cas_acq_rel(ptr, oval, nval) \
- atomic_cas(ptr, oval, nval, ATOMIC_ACQ_REL)
-
-#define atomic_swap_acquire(ptr, val) atomic_swap(ptr, val, ATOMIC_ACQUIRE)
-#define atomic_swap_release(ptr, val) atomic_swap(ptr, val, ATOMIC_RELEASE)
-#define atomic_swap_acq_rel(ptr, val) atomic_swap(ptr, val, ATOMIC_ACQ_REL)
-
-#define atomic_fetch_sub_acq_rel(ptr, val) \
- atomic_fetch_sub(ptr, val, ATOMIC_ACQ_REL)
+#define atomic_fence(mo) __atomic_thread_fence(mo)
#endif /* KERN_ATOMIC_H */
diff --git a/kern/clock.c b/kern/clock.c
index 0b72a8f..27fb9a2 100644
--- a/kern/clock.c
+++ b/kern/clock.c
@@ -82,8 +82,8 @@ void clock_tick_intr(void)
t.ticks++;
atomic_store(&clock_global_time.high2, t.high1, ATOMIC_RELAXED);
- atomic_store_release(&clock_global_time.low, t.low);
- atomic_store_release(&clock_global_time.high1, t.high1);
+ atomic_store(&clock_global_time.low, t.low, ATOMIC_RELEASE);
+ atomic_store(&clock_global_time.high1, t.high1, ATOMIC_RELEASE);
#endif /* ATOMIC_HAVE_64B_OPS */
}
diff --git a/kern/clock.h b/kern/clock.h
index 7beae9c..a5b1f69 100644
--- a/kern/clock.h
+++ b/kern/clock.h
@@ -80,8 +80,8 @@ clock_get_time(void)
*/
do {
- high1 = atomic_load_acquire(&clock_global_time.high1);
- low = atomic_load_acquire(&clock_global_time.low);
+ high1 = atomic_load(&clock_global_time.high1, ATOMIC_ACQUIRE);
+ low = atomic_load(&clock_global_time.low, ATOMIC_ACQUIRE);
high2 = atomic_load(&clock_global_time.high2, ATOMIC_RELAXED);
} while (high1 != high2);
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
index b2af456..d17ad9d 100644
--- a/kern/mutex/mutex_adaptive.c
+++ b/kern/mutex/mutex_adaptive.c
@@ -135,8 +135,8 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
mutex_adaptive_set_contended(mutex);
do {
- owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED,
- self | MUTEX_ADAPTIVE_CONTENDED);
+ owner = atomic_cas(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED,
+ self | MUTEX_ADAPTIVE_CONTENDED, ATOMIC_ACQUIRE);
assert(owner & MUTEX_ADAPTIVE_CONTENDED);
if (mutex_adaptive_get_thread(owner) == NULL) {
@@ -254,8 +254,8 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
self = (uintptr_t)thread_self() | MUTEX_ADAPTIVE_CONTENDED;
for (;;) {
- owner = atomic_cas_release(&mutex->owner, self,
- MUTEX_ADAPTIVE_CONTENDED);
+ owner = atomic_cas(&mutex->owner, self,
+ MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);
if (owner == self) {
break;
diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h
index 05e9764..b123251 100644
--- a/kern/mutex/mutex_adaptive_i.h
+++ b/kern/mutex/mutex_adaptive_i.h
@@ -55,7 +55,8 @@ mutex_adaptive_lock_fast(struct mutex *mutex)
{
uintptr_t owner;
- owner = atomic_cas_acquire(&mutex->owner, 0, (uintptr_t)thread_self());
+ owner = atomic_cas(&mutex->owner, 0,
+ (uintptr_t)thread_self(), ATOMIC_ACQUIRE);
if (unlikely(owner != 0)) {
return EBUSY;
@@ -69,7 +70,8 @@ mutex_adaptive_unlock_fast(struct mutex *mutex)
{
uintptr_t owner;
- owner = atomic_cas_release(&mutex->owner, (uintptr_t)thread_self(), 0);
+ owner = atomic_cas(&mutex->owner, (uintptr_t)thread_self(),
+ 0, ATOMIC_RELEASE);
if (unlikely(owner & MUTEX_ADAPTIVE_CONTENDED)) {
return EBUSY;
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
index f12f13f..55e7a25 100644
--- a/kern/mutex/mutex_plain.c
+++ b/kern/mutex/mutex_plain.c
@@ -84,7 +84,7 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
sleepq = sleepq_lend(mutex, false, &flags);
for (;;) {
- state = atomic_swap_release(&mutex->state, MUTEX_CONTENDED);
+ state = atomic_swap(&mutex->state, MUTEX_CONTENDED, ATOMIC_RELEASE);
if (state == MUTEX_UNLOCKED) {
break;
diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h
index d28fd92..a792829 100644
--- a/kern/mutex/mutex_plain_i.h
+++ b/kern/mutex/mutex_plain_i.h
@@ -49,7 +49,8 @@ mutex_plain_lock_fast(struct mutex *mutex)
{
unsigned int state;
- state = atomic_cas_acquire(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
+ state = atomic_cas(&mutex->state, MUTEX_UNLOCKED,
+ MUTEX_LOCKED, ATOMIC_ACQUIRE);
if (unlikely(state != MUTEX_UNLOCKED)) {
return EBUSY;
@@ -63,7 +64,7 @@ mutex_plain_unlock_fast(struct mutex *mutex)
{
unsigned int state;
- state = atomic_swap_release(&mutex->state, MUTEX_UNLOCKED);
+ state = atomic_swap(&mutex->state, MUTEX_UNLOCKED, ATOMIC_RELEASE);
if (unlikely(state == MUTEX_CONTENDED)) {
return EBUSY;
diff --git a/kern/rcu.c b/kern/rcu.c
index 834a99b..9f2b993 100644
--- a/kern/rcu.c
+++ b/kern/rcu.c
@@ -292,7 +292,7 @@ rcu_data_check_gp_state(const struct rcu_data *data,
*global_gp_state = atomic_load(&data->gp_state, ATOMIC_RELAXED);
if (unlikely(local_gp_state != *global_gp_state)) {
- atomic_fence_acquire();
+ atomic_fence(ATOMIC_ACQUIRE);
return true;
}
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index 55d17cd..6567b06 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -107,7 +107,7 @@ rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
bits = RTMUTEX_CONTENDED;
for (;;) {
- owner = atomic_cas_acquire(&rtmutex->owner, bits, self | bits);
+ owner = atomic_cas(&rtmutex->owner, bits, self | bits, ATOMIC_ACQUIRE);
assert((owner & bits) == bits);
if (owner == bits) {
@@ -211,8 +211,9 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex)
}
}
- owner = atomic_swap_release(&rtmutex->owner,
- RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED);
+ owner = atomic_swap(&rtmutex->owner,
+ RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED,
+ ATOMIC_RELEASE);
assert(rtmutex_get_thread(owner) == thread_self());
turnstile_disown(turnstile);
diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h
index 9d3689d..64ff69a 100644
--- a/kern/rtmutex_i.h
+++ b/kern/rtmutex_i.h
@@ -57,7 +57,7 @@ rtmutex_lock_fast(struct rtmutex *rtmutex)
owner = (uintptr_t)thread_self();
rtmutex_assert_owner_aligned(owner);
- return atomic_cas_acquire(&rtmutex->owner, 0, owner);
+ return atomic_cas(&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE);
}
static inline uintptr_t
@@ -67,7 +67,7 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex)
owner = (uintptr_t)thread_self();
rtmutex_assert_owner_aligned(owner);
- prev_owner = atomic_cas_release(&rtmutex->owner, owner, 0);
+ prev_owner = atomic_cas(&rtmutex->owner, owner, 0, ATOMIC_RELEASE);
assert((prev_owner & RTMUTEX_OWNER_MASK) == owner);
return prev_owner;
}
diff --git a/kern/semaphore_i.h b/kern/semaphore_i.h
index a4c50b8..d58ad0b 100644
--- a/kern/semaphore_i.h
+++ b/kern/semaphore_i.h
@@ -39,7 +39,7 @@ semaphore_dec(struct semaphore *semaphore)
break;
}
- prev = atomic_cas_acquire(&semaphore->value, value, value - 1);
+ prev = atomic_cas(&semaphore->value, value, value - 1, ATOMIC_ACQUIRE);
} while (prev != value);
return value;
diff --git a/kern/spinlock.c b/kern/spinlock.c
index a1c013c..71e60cb 100644
--- a/kern/spinlock.c
+++ b/kern/spinlock.c
@@ -260,7 +260,7 @@ spinlock_get_remote_qnode(uint32_t qid)
unsigned int ctx, cpu;
/* This fence synchronizes with queueing */
- atomic_fence_acquire();
+ atomic_fence(ATOMIC_ACQUIRE);
ctx = spinlock_qid_ctx(qid);
cpu = spinlock_qid_cpu(qid);
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
index da1233b..a3e2fe1 100644
--- a/kern/spinlock_i.h
+++ b/kern/spinlock_i.h
@@ -65,7 +65,8 @@ spinlock_lock_fast(struct spinlock *lock)
{
uint32_t prev;
- prev = atomic_cas_acquire(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED);
+ prev = atomic_cas(&lock->value, SPINLOCK_UNLOCKED,
+ SPINLOCK_LOCKED, ATOMIC_ACQUIRE);
if (unlikely(prev != SPINLOCK_UNLOCKED)) {
return EBUSY;
diff --git a/kern/task.h b/kern/task.h
index 5355c47..d6e9eb4 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -64,7 +64,7 @@ task_unref(struct task *task)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_sub_acq_rel(&task->nr_refs, 1);
+ nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL);
assert(nr_refs != 0);
if (nr_refs == 1) {
diff --git a/kern/thread.h b/kern/thread.h
index 4953035..b97dbed 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -305,7 +305,7 @@ thread_unref(struct thread *thread)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_sub_acq_rel(&thread->nr_refs, 1);
+ nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL);
assert(nr_refs != 0);
if (nr_refs == 1) {
diff --git a/tools/build_configs.py b/tools/build_configs.py
index dab95b7..ba872d4 100755
--- a/tools/build_configs.py
+++ b/tools/build_configs.py
@@ -88,6 +88,8 @@ all_cc_options_dict = {
# option when building a configuration.
small_options_dict = {
+ 'CONFIG_CC_EXE' : ['gcc', 'clang'],
+ 'CONFIG_64BITS' : ['y', 'n'],
'CONFIG_CC_OPTIONS' : gen_cc_options_list(all_cc_options_dict),
'CONFIG_SMP' : ['y', 'n'],
'CONFIG_MAX_CPUS' : ['1', '128'],
@@ -97,8 +99,6 @@ small_options_dict = {
large_options_dict = dict(small_options_dict)
large_options_dict.update({
- 'CONFIG_CC_EXE' : ['gcc', 'clang'],
- 'CONFIG_64BITS' : ['y', 'n'],
'CONFIG_X86_PAE' : ['y', 'n'],
'CONFIG_MUTEX_ADAPTIVE' : ['y', 'n'],
'CONFIG_MUTEX_PI' : ['y', 'n'],
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 8cd7a11..4f2aaf9 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -252,7 +252,7 @@ vm_page_unref(struct vm_page *page)
{
unsigned int nr_refs;
- nr_refs = atomic_fetch_sub_acq_rel(&page->nr_refs, 1);
+ nr_refs = atomic_fetch_sub(&page->nr_refs, 1, ATOMIC_ACQ_REL);
assert(nr_refs != 0);
if (nr_refs == 1) {
@@ -272,7 +272,8 @@ vm_page_tryref(struct vm_page *page)
return EAGAIN;
}
- prev = atomic_cas_acquire(&page->nr_refs, nr_refs, nr_refs + 1);
+ prev = atomic_cas(&page->nr_refs, nr_refs,
+ nr_refs + 1, ATOMIC_ACQUIRE);
} while (prev != nr_refs);
return 0;