summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/kern/thread.c b/kern/thread.c
index d8f0de1..9fe6434 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 Richard Braun.
+ * Copyright (c) 2012-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -92,17 +92,16 @@
#include <kern/atomic.h>
#include <kern/clock.h>
-#include <kern/condition.h>
#include <kern/cpumap.h>
#include <kern/error.h>
#include <kern/init.h>
#include <kern/kmem.h>
#include <kern/list.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
#include <kern/perfmon.h>
+#include <kern/rcu.h>
#include <kern/shell.h>
#include <kern/sleepq.h>
#include <kern/spinlock.h>
@@ -637,8 +636,6 @@ thread_runq_schedule(struct thread_runq *runq)
assert(!cpu_intr_enabled());
spinlock_assert_locked(&runq->lock);
- llsync_report_context_switch();
-
thread_clear_flag(prev, THREAD_YIELD);
thread_runq_put_prev(runq, prev);
@@ -657,6 +654,9 @@ thread_runq_schedule(struct thread_runq *runq)
if (likely(prev != next)) {
thread_runq_schedule_unload(prev);
+ rcu_report_context_switch(thread_rcu_reader(prev));
+ spinlock_transfer_owner(&runq->lock, next);
+
/*
* That's where the true context switch occurs. The next thread must
* unlock the run queue and reenable preemption. Note that unlocking
@@ -669,15 +669,18 @@ thread_runq_schedule(struct thread_runq *runq)
* The thread is dispatched on a processor once again.
*
* Keep in mind the system state may have changed a lot since this
- * function was called. In particular, the next thread may have been
- * destroyed, and must not be referenced any more.
+ * function was called. In particular :
+ * - The next thread may have been destroyed, and must not be
+ * referenced any more.
+ * - The current thread may have been migrated to another processor.
*/
barrier();
-
thread_runq_schedule_load(prev);
- /* The thread might have been moved to another processor */
+ next = NULL;
runq = thread_runq_local();
+ } else {
+ next = NULL;
}
assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL);
@@ -1713,6 +1716,7 @@ thread_init_booter(unsigned int cpu)
booter->flags = 0;
booter->intr_level = 0;
booter->preempt_level = 1;
+ rcu_reader_init(&booter->rcu_reader);
cpumap_fill(&booter->cpumap);
thread_set_user_sched_policy(booter, THREAD_SCHED_POLICY_IDLE);
thread_set_user_sched_class(booter, THREAD_SCHED_CLASS_IDLE);
@@ -1836,12 +1840,11 @@ thread_init(struct thread *thread, void *stack,
}
turnstile_td_init(&thread->turnstile_td);
- thread->last_cond = NULL;
thread->propagate_priority = false;
thread->preempt_level = THREAD_SUSPEND_PREEMPT_LEVEL;
thread->pin_level = 0;
thread->intr_level = 0;
- thread->llsync_level = 0;
+ rcu_reader_init(&thread->rcu_reader);
cpumap_copy(&thread->cpumap, cpumap);
thread_set_user_sched_policy(thread, attr->policy);
thread_set_user_sched_class(thread, thread_policy_to_class(attr->policy));
@@ -2137,7 +2140,6 @@ static void
thread_idle(void *arg)
{
struct thread *self;
- int error;
(void)arg;
@@ -2145,14 +2147,6 @@ thread_idle(void *arg)
for (;;) {
thread_preempt_disable();
- error = sref_unregister();
-
- if (error) {
- assert(error == ERROR_BUSY);
- goto error_sref;
- }
-
- llsync_unregister();
for (;;) {
cpu_intr_disable();
@@ -2165,10 +2159,6 @@ thread_idle(void *arg)
cpu_idle();
}
- llsync_register();
- sref_register();
-
-error_sref:
thread_preempt_enable();
}
}
@@ -2340,23 +2330,33 @@ thread_setup(void)
return 0;
}
+#ifdef CONFIG_THREAD_STACK_GUARD
+#define THREAD_STACK_GUARD_INIT_OP_DEPS \
+ INIT_OP_DEP(vm_kmem_setup, true), \
+ INIT_OP_DEP(vm_map_setup, true), \
+ INIT_OP_DEP(vm_page_setup, true),
+#else /* CONFIG_THREAD_STACK_GUARD */
+#define THREAD_STACK_GUARD_INIT_OP_DEPS
+#endif /* CONFIG_THREAD_STACK_GUARD */
+
+#ifdef CONFIG_PERFMON
+#define THREAD_PERFMON_INIT_OP_DEPS \
+ INIT_OP_DEP(perfmon_bootstrap, true),
+#else /* CONFIG_PERFMON */
+#define THREAD_PERFMON_INIT_OP_DEPS
+#endif /* CONFIG_PERFMON */
+
INIT_OP_DEFINE(thread_setup,
INIT_OP_DEP(cpumap_setup, true),
INIT_OP_DEP(kmem_setup, true),
INIT_OP_DEP(pmap_setup, true),
-#ifdef CONFIG_PERFMON
- INIT_OP_DEP(perfmon_bootstrap, true),
-#endif
INIT_OP_DEP(sleepq_setup, true),
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_bootstrap, true),
INIT_OP_DEP(turnstile_setup, true),
-#ifdef CONFIG_THREAD_STACK_GUARD
- INIT_OP_DEP(vm_kmem_setup, true),
- INIT_OP_DEP(vm_map_setup, true),
- INIT_OP_DEP(vm_page_setup, true),
-#endif
- );
+ THREAD_PERFMON_INIT_OP_DEPS
+ THREAD_STACK_GUARD_INIT_OP_DEPS
+);
void __init
thread_ap_setup(void)
@@ -2654,11 +2654,11 @@ thread_run_scheduler(void)
assert(thread == runq->current);
assert(thread->preempt_level == (THREAD_SUSPEND_PREEMPT_LEVEL - 1));
- llsync_register();
sref_register();
spinlock_lock(&runq->lock);
thread = thread_runq_get_next(thread_runq_local());
+ spinlock_transfer_owner(&runq->lock, thread);
tcb_load(&thread->tcb);
}