summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c163
1 files changed, 113 insertions, 50 deletions
diff --git a/kern/thread.c b/kern/thread.c
index fba95e06..e4d8f7cf 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -91,6 +91,7 @@
#include <string.h>
#include <kern/atomic.h>
+#include <kern/clock.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
#include <kern/error.h>
@@ -108,6 +109,7 @@
#include <kern/syscnt.h>
#include <kern/task.h>
#include <kern/thread.h>
+#include <kern/timer.h>
#include <kern/turnstile.h>
#include <kern/work.h>
#include <machine/cpu.h>
@@ -160,7 +162,7 @@
/*
* Default time slice for real-time round-robin scheduling.
*/
-#define THREAD_DEFAULT_RR_TIME_SLICE (THREAD_TICK_FREQ / 10)
+#define THREAD_DEFAULT_RR_TIME_SLICE (CLOCK_FREQ / 10)
/*
* Maximum number of threads which can be pulled from a remote run queue
@@ -171,7 +173,7 @@
/*
* Delay (in ticks) between two balance attempts when a run queue is idle.
*/
-#define THREAD_IDLE_BALANCE_TICKS (THREAD_TICK_FREQ / 2)
+#define THREAD_IDLE_BALANCE_TICKS (CLOCK_FREQ / 2)
/*
* Run queue properties for real-time threads.
@@ -191,7 +193,7 @@ struct thread_rt_runq {
/*
* Round slice base unit for fair-scheduling threads.
*/
-#define THREAD_FS_ROUND_SLICE_BASE (THREAD_TICK_FREQ / 10)
+#define THREAD_FS_ROUND_SLICE_BASE (CLOCK_FREQ / 10)
/*
* Group of threads sharing the same weight.
@@ -257,7 +259,6 @@ struct thread_runq {
unsigned int idle_balance_ticks;
struct syscnt sc_schedule_intrs;
- struct syscnt sc_tick_intrs;
struct syscnt sc_boosts;
};
@@ -451,8 +452,6 @@ thread_runq_init(struct thread_runq *runq, unsigned int cpu,
runq->idle_balance_ticks = (unsigned int)-1;
snprintf(name, sizeof(name), "thread_schedule_intrs/%u", cpu);
syscnt_register(&runq->sc_schedule_intrs, name);
- snprintf(name, sizeof(name), "thread_tick_intrs/%u", cpu);
- syscnt_register(&runq->sc_tick_intrs, name);
snprintf(name, sizeof(name), "thread_boosts/%u", cpu);
syscnt_register(&runq->sc_boosts, name);
}
@@ -1862,7 +1861,7 @@ thread_lock_runq(struct thread *thread, unsigned long *flags)
struct thread_runq *runq;
for (;;) {
- runq = thread->runq;
+ runq = thread->runq; /* TODO Atomic access */
spinlock_lock_intr_save(&runq->lock, flags);
@@ -2421,17 +2420,94 @@ thread_join(struct thread *thread)
thread_join_common(thread);
}
-void
-thread_sleep(struct spinlock *interlock, const void *wchan_addr,
- const char *wchan_desc)
+static int
+thread_wakeup_common(struct thread *thread, int error)
{
struct thread_runq *runq;
+ unsigned long flags;
+
+ if ((thread == NULL) || (thread == thread_self())) {
+ return ERROR_INVAL;
+ }
+
+ /*
+ * There is at most one reference on threads that were never dispatched,
+ * in which case there is no need to lock anything.
+ */
+ if (thread->runq == NULL) {
+ assert(thread->state != THREAD_RUNNING);
+ thread_clear_wchan(thread);
+ thread->state = THREAD_RUNNING;
+ } else {
+ runq = thread_lock_runq(thread, &flags);
+
+ if (thread->state == THREAD_RUNNING) {
+ thread_unlock_runq(runq, flags);
+ return ERROR_INVAL;
+ }
+
+ thread_clear_wchan(thread);
+ thread->state = THREAD_RUNNING;
+ thread_unlock_runq(runq, flags);
+ }
+
+ thread_preempt_disable();
+ cpu_intr_save(&flags);
+
+ if (!thread->pinned) {
+ runq = thread_get_real_sched_ops(thread)->select_runq(thread);
+ } else {
+ runq = thread->runq;
+ spinlock_lock(&runq->lock);
+ }
+
+ thread->wakeup_error = error;
+ thread_runq_wakeup(runq, thread);
+ spinlock_unlock(&runq->lock);
+ cpu_intr_restore(flags);
+ thread_preempt_enable();
+
+ return 0;
+}
+
+int
+thread_wakeup(struct thread *thread)
+{
+ return thread_wakeup_common(thread, 0);
+}
+
+struct thread_timeout_waiter {
+ struct thread *thread;
+ struct timer timer;
+};
+
+static void
+thread_timeout(struct timer *timer)
+{
+ struct thread_timeout_waiter *waiter;
+
+ waiter = structof(timer, struct thread_timeout_waiter, timer);
+ thread_wakeup_common(waiter->thread, ERROR_TIMEDOUT);
+}
+
+static int
+thread_sleep_common(struct spinlock *interlock, const void *wchan_addr,
+ const char *wchan_desc, bool timed, uint64_t ticks)
+{
+ struct thread_timeout_waiter waiter;
+ struct thread_runq *runq;
struct thread *thread;
unsigned long flags;
thread = thread_self();
assert(thread->preempt == 1);
+ if (timed) {
+ waiter.thread = thread;
+ timer_init(&waiter.timer, thread_timeout, TIMER_INTR);
+ timer_schedule(&waiter.timer, ticks);
+ }
+
runq = thread_runq_local();
spinlock_lock_intr_save(&runq->lock, &flags);
@@ -2448,58 +2524,49 @@ thread_sleep(struct spinlock *interlock, const void *wchan_addr,
spinlock_unlock_intr_restore(&runq->lock, flags);
+ if (timed) {
+ timer_cancel(&waiter.timer);
+ }
+
if (interlock != NULL) {
spinlock_lock(interlock);
thread_preempt_enable_no_resched();
}
assert(thread->preempt == 1);
+
+ return thread->wakeup_error;
}
void
-thread_wakeup(struct thread *thread)
+thread_sleep(struct spinlock *interlock, const void *wchan_addr,
+ const char *wchan_desc)
{
- struct thread_runq *runq;
- unsigned long flags;
-
- if ((thread == NULL) || (thread == thread_self())) {
- return;
- }
-
- /*
- * There is at most one reference on threads that were never dispatched,
- * in which case there is no need to lock anything.
- */
- if (thread->runq == NULL) {
- assert(thread->state != THREAD_RUNNING);
- thread_clear_wchan(thread);
- thread->state = THREAD_RUNNING;
- } else {
- runq = thread_lock_runq(thread, &flags);
+ int error;
- if (thread->state == THREAD_RUNNING) {
- thread_unlock_runq(runq, flags);
- return;
- }
+ error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0);
+ assert(!error);
+}
- thread_clear_wchan(thread);
- thread->state = THREAD_RUNNING;
- thread_unlock_runq(runq, flags);
- }
+int
+thread_timedsleep(struct spinlock *interlock, const void *wchan_addr,
+ const char *wchan_desc, uint64_t ticks)
+{
+ return thread_sleep_common(interlock, wchan_addr, wchan_desc, true, ticks);
+}
+void
+thread_delay(uint64_t ticks, bool absolute)
+{
thread_preempt_disable();
- cpu_intr_save(&flags);
- if (!thread->pinned) {
- runq = thread_get_real_sched_ops(thread)->select_runq(thread);
- } else {
- runq = thread->runq;
- spinlock_lock(&runq->lock);
+ if (!absolute) {
+ /* Add a tick to avoid quantization errors */
+ ticks += clock_get_time() + 1;
}
- thread_runq_wakeup(runq, thread);
- spinlock_unlock(&runq->lock);
- cpu_intr_restore(flags);
+ thread_timedsleep(NULL, thread_self(), "delay", ticks);
+
thread_preempt_enable();
}
@@ -2570,7 +2637,7 @@ thread_schedule_intr(void)
}
void
-thread_tick_intr(void)
+thread_report_periodic_event(void)
{
const struct thread_sched_ops *ops;
struct thread_runq *runq;
@@ -2579,10 +2646,6 @@ thread_tick_intr(void)
thread_assert_interrupted();
runq = thread_runq_local();
- syscnt_inc(&runq->sc_tick_intrs);
- llsync_report_periodic_event();
- sref_report_periodic_event();
- work_report_periodic_event();
thread = thread_self();
spinlock_lock(&runq->lock);