summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kern/atomic.h3
-rw-r--r--kern/bitmap.h14
-rw-r--r--kern/bitmap_i.h2
-rw-r--r--kern/rtmutex.c4
-rw-r--r--kern/sref.c10
-rw-r--r--kern/thread_i.h4
6 files changed, 23 insertions, 14 deletions
diff --git a/kern/atomic.h b/kern/atomic.h
index 63f0ac73..6c0105dd 100644
--- a/kern/atomic.h
+++ b/kern/atomic.h
@@ -135,7 +135,4 @@ MACRO_END
#define atomic_fetch_sub_acq_rel(ptr, val) \
atomic_fetch_sub(ptr, val, ATOMIC_ACQ_REL)
-#define atomic_or_acq_rel(ptr, val) atomic_or(ptr, val, ATOMIC_ACQ_REL)
-#define atomic_and_acq_rel(ptr, val) atomic_and(ptr, val, ATOMIC_ACQ_REL)
-
#endif /* _KERN_ATOMIC_H */
diff --git a/kern/bitmap.h b/kern/bitmap.h
index a604d17c..c206944a 100644
--- a/kern/bitmap.h
+++ b/kern/bitmap.h
@@ -78,7 +78,7 @@ bitmap_set_atomic(unsigned long *bm, int bit)
bitmap_lookup(bm, bit);
}
- atomic_or_acq_rel(bm, bitmap_mask(bit));
+ atomic_or(bm, bitmap_mask(bit), ATOMIC_RELEASE);
}
static inline void
@@ -98,7 +98,7 @@ bitmap_clear_atomic(unsigned long *bm, int bit)
bitmap_lookup(bm, bit);
}
- atomic_and_acq_rel(bm, ~bitmap_mask(bit));
+ atomic_and(bm, ~bitmap_mask(bit), ATOMIC_RELEASE);
}
static inline int
@@ -111,6 +111,16 @@ bitmap_test(const unsigned long *bm, int bit)
return ((*bm & bitmap_mask(bit)) != 0);
}
+static inline int
+bitmap_test_atomic(const unsigned long *bm, int bit)
+{
+ if (bit >= LONG_BIT) {
+ bitmap_lookup(bm, bit);
+ }
+
+ return ((atomic_load(bm, ATOMIC_ACQUIRE) & bitmap_mask(bit)) != 0);
+}
+
static inline void
bitmap_and(unsigned long *a, const unsigned long *b, int nr_bits)
{
diff --git a/kern/bitmap_i.h b/kern/bitmap_i.h
index 9b79d3ce..11c23ebb 100644
--- a/kern/bitmap_i.h
+++ b/kern/bitmap_i.h
@@ -29,6 +29,8 @@
* to a bit inside the word pointed by the former.
*
* Implemented as a macro for const-correctness.
+ *
+ * TODO Fix interface.
*/
#define bitmap_lookup(bm, bit) \
MACRO_BEGIN \
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index 09f011c4..e78c3899 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -29,7 +29,7 @@
static void
rtmutex_set_contended(struct rtmutex *rtmutex)
{
- atomic_or_acq_rel(&rtmutex->owner, RTMUTEX_CONTENDED);
+ atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_RELEASE);
}
void
@@ -64,7 +64,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
turnstile_own(turnstile);
if (turnstile_empty(turnstile)) {
- prev_owner = atomic_swap_acquire(&rtmutex->owner, owner);
+ prev_owner = atomic_swap(&rtmutex->owner, owner, ATOMIC_RELAXED);
assert(prev_owner == (owner | bits));
}
diff --git a/kern/sref.c b/kern/sref.c
index 5aad8dab..bc3f9e98 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -259,13 +259,13 @@ sref_weakref_init(struct sref_weakref *weakref, struct sref_counter *counter)
static void
sref_weakref_mark_dying(struct sref_weakref *weakref)
{
- atomic_or_acq_rel(&weakref->addr, SREF_WEAKREF_DYING);
+ atomic_or(&weakref->addr, SREF_WEAKREF_DYING, ATOMIC_RELEASE);
}
static void
sref_weakref_clear_dying(struct sref_weakref *weakref)
{
- atomic_and_acq_rel(&weakref->addr, SREF_WEAKREF_MASK);
+ atomic_and(&weakref->addr, SREF_WEAKREF_MASK, ATOMIC_RELEASE);
}
static int
@@ -273,8 +273,8 @@ sref_weakref_kill(struct sref_weakref *weakref)
{
uintptr_t addr, oldval;
- addr = weakref->addr | SREF_WEAKREF_DYING;
- oldval = atomic_cas_release(&weakref->addr, addr, (uintptr_t)NULL);
+ addr = atomic_load(&weakref->addr, ATOMIC_RELAXED) | SREF_WEAKREF_DYING;
+ oldval = atomic_cas(&weakref->addr, addr, (uintptr_t)NULL, ATOMIC_RELAXED);
if (oldval != addr) {
assert((oldval & SREF_WEAKREF_MASK) == (addr & SREF_WEAKREF_MASK));
@@ -290,7 +290,7 @@ sref_weakref_tryget(struct sref_weakref *weakref)
uintptr_t addr, oldval, newval;
do {
- addr = weakref->addr;
+ addr = atomic_load(&weakref->addr, ATOMIC_RELAXED);
newval = addr & SREF_WEAKREF_MASK;
oldval = atomic_cas_acquire(&weakref->addr, addr, newval);
} while (oldval != addr);
diff --git a/kern/thread_i.h b/kern/thread_i.h
index 2e1b88aa..6f08c335 100644
--- a/kern/thread_i.h
+++ b/kern/thread_i.h
@@ -195,13 +195,13 @@ void thread_destroy(struct thread *thread);
static inline void
thread_set_flag(struct thread *thread, unsigned long flag)
{
- atomic_or_acq_rel(&thread->flags, flag);
+ atomic_or(&thread->flags, flag, ATOMIC_RELEASE);
}
static inline void
thread_clear_flag(struct thread *thread, unsigned long flag)
{
- atomic_and_acq_rel(&thread->flags, ~flag);
+ atomic_and(&thread->flags, ~flag, ATOMIC_RELEASE);
}
static inline int