summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2019-08-16 20:02:37 +0200
committerRichard Braun <rbraun@sceen.net>2019-08-16 20:30:48 +0200
commit84d47c358b1d07b9155474d7075e082c52147ebc (patch)
tree0a97fbcfe7f56637e18afc9e1ba4cefa3cea1aa5
parent27dbd3b2fdafd6e12bdef5f653532b2ca752a6f9 (diff)
Replace some barrier()s with local atomic fences
-rw-r--r--kern/thread.c4
-rw-r--r--kern/thread.h13
2 files changed, 10 insertions, 7 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 71943c04..0bad0752 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -96,6 +96,7 @@
#include <kern/cpumap.h>
#include <kern/init.h>
#include <kern/kmem.h>
+#include <kern/latomic.h>
#include <kern/list.h>
#include <kern/macros.h>
#include <kern/panic.h>
@@ -674,7 +675,9 @@ thread_runq_schedule(struct thread_runq *runq)
* and locking the run queue again is equivalent to a full memory
* barrier.
*/
+ latomic_fence(LATOMIC_ACQ_REL);
tcb_switch(&prev->tcb, &next->tcb);
+ latomic_fence(LATOMIC_ACQ_REL);
/*
* The thread is dispatched on a processor once again.
@@ -685,7 +688,6 @@ thread_runq_schedule(struct thread_runq *runq)
* referenced any more.
* - The current thread may have been migrated to another processor.
*/
- barrier();
thread_runq_schedule_load(prev);
next = NULL;
diff --git a/kern/thread.h b/kern/thread.h
index 0c8c1014..d77b35f3 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -43,6 +43,7 @@
#include <kern/init.h>
#include <kern/cpumap.h>
#include <kern/kernel.h>
+#include <kern/latomic.h>
#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <kern/turnstile_types.h>
@@ -548,7 +549,7 @@ thread_pin(void)
thread = thread_self();
thread->pin_level++;
assert(thread->pin_level != 0);
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
}
/*
@@ -561,7 +562,7 @@ thread_unpin(void)
{
struct thread *thread;
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
thread = thread_self();
assert(thread->pin_level != 0);
thread->pin_level--;
@@ -595,7 +596,7 @@ thread_preempt_disable(void)
thread = thread_self();
thread->preempt_level++;
assert(thread->preempt_level != 0);
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
}
/*
@@ -613,7 +614,7 @@ thread_preempt_enable_no_resched(void)
{
struct thread *thread;
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
thread = thread_self();
assert(thread->preempt_level != 0);
thread->preempt_level--;
@@ -713,7 +714,7 @@ thread_intr_enter(void)
thread->intr_level++;
assert(thread->intr_level != 0);
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
}
/*
@@ -726,7 +727,7 @@ thread_intr_leave(void)
{
struct thread *thread;
- barrier();
+ latomic_fence(LATOMIC_ACQ_REL);
thread = thread_self();
assert(thread->intr_level != 0);
thread->intr_level--;