summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2014-06-18 23:35:45 +0200
committerRichard Braun <rbraun@sceen.net>2014-06-18 23:35:45 +0200
commitc3c302a2041e05765d97b1c8d44735a56da0f1cd (patch)
tree86d79fba374c6a8e4638182124aac5fa260743d0 /kern/thread.c
parentf6dd434b4b37ca7bcabed3d219bcdf7cd6fce815 (diff)
kern/thread: support thread_sleep without interlock
The thread_sleep function takes a spin lock as a parameter. This lock is used as an interlock allowing either the thread waking up or the thread being awaken to run. But when threads don't need a lock to provide that guarantee, the interlock is only overhead. This change makes thread_sleep assume disabling preemption is used as a synchronization mechanism when the interlock is NULL.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kern/thread.c b/kern/thread.c
index b660a15d..4449fe8c 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -516,6 +516,9 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev)
runq = thread_runq_local();
}
+ assert(prev->preempt == 2);
+ assert(!cpu_intr_enabled());
+ spinlock_assert_locked(&runq->lock);
return runq;
}
@@ -1871,11 +1874,15 @@ thread_sleep(struct spinlock *interlock)
unsigned long flags;
thread = thread_self();
+ assert(thread->preempt == 1);
- thread_preempt_disable();
runq = thread_runq_local();
spinlock_lock_intr_save(&runq->lock, &flags);
- spinlock_unlock(interlock);
+
+ if (interlock != NULL) {
+ thread_preempt_disable();
+ spinlock_unlock(interlock);
+ }
thread->state = THREAD_SLEEPING;
@@ -1883,9 +1890,13 @@ thread_sleep(struct spinlock *interlock)
assert(thread->state == THREAD_RUNNING);
spinlock_unlock_intr_restore(&runq->lock, flags);
- thread_preempt_enable();
- spinlock_lock(interlock);
+ if (interlock != NULL) {
+ spinlock_lock(interlock);
+ thread_preempt_enable_no_resched();
+ }
+
+ assert(thread->preempt == 1);
}
void