summaryrefslogtreecommitdiff
path: root/kern/rtmutex.c
diff options
context:
space:
mode:
authorAgustina Arzille <avarzille@riseup.net>2017-04-03 16:09:51 +0200
committerRichard Braun <rbraun@sceen.net>2017-04-04 22:07:06 +0200
commitb1730c99f882fc2662c6b64371a4b11a8231bb9f (patch)
treec4fa5fa51287aee6d6cb372f1cfa8f6413ababd6 /kern/rtmutex.c
parentd5bb14cf6a8305bda2a5a73ce727e5309996a20a (diff)
Use the new atomic operations interface
Stick to a sequentially consistent model for most atomic operations as it matches the semantics of the existing code. Each call site will have to be reevaluated in order to switch to more relaxed accesses where possible.
Diffstat (limited to 'kern/rtmutex.c')
-rw-r--r--kern/rtmutex.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index f79f8a8..3f4251f 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -19,23 +19,17 @@
#include <stdint.h>
#include <kern/assert.h>
+#include <kern/atomic.h>
#include <kern/rtmutex.h>
#include <kern/rtmutex_i.h>
#include <kern/rtmutex_types.h>
#include <kern/thread.h>
#include <kern/turnstile.h>
-#include <machine/atomic.h>
static void
rtmutex_set_contended(struct rtmutex *rtmutex)
{
- uintptr_t owner, prev_owner;
-
- do {
- owner = rtmutex->owner;
- prev_owner = atomic_cas_uintptr(&rtmutex->owner, owner,
- owner | RTMUTEX_CONTENDED);
- } while (prev_owner != owner);
+ atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_SEQ_CST);
}
void
@@ -55,7 +49,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
bits = RTMUTEX_CONTENDED;
for (;;) {
- prev_owner = atomic_cas_uintptr(&rtmutex->owner, bits, owner | bits);
+ prev_owner = atomic_cas_seq_cst(&rtmutex->owner, bits, owner | bits);
assert((prev_owner & bits) == bits);
if (prev_owner == bits) {
@@ -70,7 +64,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
turnstile_own(turnstile);
if (turnstile_empty(turnstile)) {
- prev_owner = atomic_swap_uintptr(&rtmutex->owner, owner);
+ prev_owner = atomic_swap_seq_cst(&rtmutex->owner, owner);
assert(prev_owner == (owner | bits));
}
@@ -95,7 +89,7 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex)
turnstile = turnstile_acquire(rtmutex);
assert(turnstile != NULL);
- prev_owner = atomic_swap_uintptr(&rtmutex->owner,
+ prev_owner = atomic_swap_seq_cst(&rtmutex->owner,
RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED);
assert((prev_owner & RTMUTEX_OWNER_MASK) == owner);