summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-01-07 23:57:23 +0100
committerRichard Braun <rbraun@sceen.net>2018-01-07 23:57:23 +0100
commitd63c9bbc67a60567b384e066583d5e5428a6ea1f (patch)
tree94e9384f2a47890b0f62ea1c5c1472a44b727935 /kern/thread.c
parent9ed76fef2de5bd094559bf4a85ffcecb5474136a (diff)
kern/thread: fix initialization race
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 5bc643c..6f2f2d8 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -340,6 +340,15 @@ static unsigned int thread_nr_keys __read_mostly;
*/
static thread_dtor_fn_t thread_dtors[THREAD_KEYS_MAX] __read_mostly;
+/*
+ * Number of processors which have requested the scheduler to run.
+ *
+ * This value is used to implement a global barrier across the entire
+ * system at boot time, so that inter-processor requests may not be
+ * lost in case a processor is slower to initialize.
+ */
+static unsigned int thread_nr_boot_cpus __initdata;
+
struct thread_zombie {
struct work work;
struct thread *thread;
@@ -2578,6 +2587,26 @@ thread_delay(uint64_t ticks, bool absolute)
thread_preempt_enable();
}
+static void __init
+thread_boot_barrier(void)
+{
+ unsigned int nr_cpus;
+
+ assert(!cpu_intr_enabled());
+
+ atomic_add(&thread_nr_boot_cpus, 1, ATOMIC_RELAXED);
+
+ for (;;) {
+ nr_cpus = atomic_load(&thread_nr_boot_cpus, ATOMIC_SEQ_CST);
+
+ if (nr_cpus == cpu_count()) {
+ break;
+ }
+
+ cpu_pause();
+ }
+}
+
void __init
thread_run_scheduler(void)
{
@@ -2586,6 +2615,8 @@ thread_run_scheduler(void)
assert(!cpu_intr_enabled());
+ thread_boot_barrier();
+
runq = thread_runq_local();
thread = thread_self();
assert(thread == runq->current);