summaryrefslogtreecommitdiff
path: root/kern/thread.h
diff options
context:
space:
mode:
authorAgustina Arzille <avarzille@riseup.net>2017-04-03 16:09:51 +0200
committerRichard Braun <rbraun@sceen.net>2017-04-04 22:07:06 +0200
commitb1730c99f882fc2662c6b64371a4b11a8231bb9f (patch)
treec4fa5fa51287aee6d6cb372f1cfa8f6413ababd6 /kern/thread.h
parentd5bb14cf6a8305bda2a5a73ce727e5309996a20a (diff)
Use the new atomic operations interface
Stick to a sequentially consistent model for most atomic operations as it matches the semantics of the existing code. Each call site will have to be reevaluated in order to switch to more relaxed accesses where possible.
Diffstat (limited to 'kern/thread.h')
-rw-r--r--kern/thread.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/kern/thread.h b/kern/thread.h
index c7f68f14..3fa2a1f8 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -37,12 +37,12 @@
#include <stddef.h>
#include <kern/assert.h>
+#include <kern/atomic.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <kern/turnstile_types.h>
-#include <machine/atomic.h>
#include <machine/cpu.h>
#include <machine/tcb.h>
@@ -270,7 +270,7 @@ thread_ref(struct thread *thread)
{
unsigned long nr_refs;
- nr_refs = atomic_fetchadd_ulong(&thread->nr_refs, 1);
+ nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_SEQ_CST);
assert(nr_refs != (unsigned long)-1);
}
@@ -279,7 +279,7 @@ thread_unref(struct thread *thread)
{
unsigned long nr_refs;
- nr_refs = atomic_fetchadd_ulong(&thread->nr_refs, -1);
+ nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_SEQ_CST);
assert(nr_refs != 0);
if (nr_refs == 1) {