summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/boot.c4
-rw-r--r--arch/x86/machine/cpu.c71
-rw-r--r--arch/x86/machine/cpu.h13
-rw-r--r--arch/x86/machine/pmap.c12
-rw-r--r--arch/x86/machine/pmap.h9
-rw-r--r--kern/kernel.c28
-rw-r--r--kern/thread.c22
-rw-r--r--kern/thread.h4
8 files changed, 69 insertions, 94 deletions
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 02a4050f..5e8b735f 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2012, 2013 Richard Braun.
+ * Copyright (c) 2010-2014 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -281,7 +281,7 @@ boot_main(void)
vm_page_info();
pic_setup();
pit_setup();
- cpu_mp_setup();
+ cpu_mp_probe();
kernel_main();
/* Never reached */
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index 5cdd85ff..5e18eb54 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -71,20 +71,10 @@ static struct cpu cpu_array[MAX_CPUS];
/*
* Number of configured processors.
- *
- * The boot version is used until all processors are configured, since some
- * modules depend on cpu_count() to adjust their behaviour when several
- * processors are present.
*/
-static unsigned int cpu_boot_array_size __initdata;
unsigned int cpu_array_size __read_mostly;
/*
- * Barrier for processor synchronization on kernel entry.
- */
-static unsigned int cpu_mp_synced __initdata;
-
-/*
* Interrupt descriptor table.
*/
static struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __aligned(8) __read_mostly;
@@ -418,7 +408,6 @@ cpu_setup(void)
cpu_array[i].state = CPU_STATE_OFF;
}
- cpu_boot_array_size = 1;
cpu_array_size = 1;
cpu_array[0].double_fault_stack = (unsigned long)cpu_double_fault_stack;
cpu_init(&cpu_array[0]);
@@ -459,6 +448,8 @@ cpu_info(const struct cpu *cpu)
void __init
cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
{
+ static int skip_warning __initdata;
+
if (is_bsp) {
if (cpu_array[0].apic_id != CPU_INVALID_APIC_ID)
panic("cpu: another processor pretends to be the BSP");
@@ -467,17 +458,28 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
return;
}
- if (cpu_boot_array_size == ARRAY_SIZE(cpu_array)) {
- printk("cpu: ignoring processor beyond id %u\n", MAX_CPUS - 1);
+ if (cpu_array_size == ARRAY_SIZE(cpu_array)) {
+ if (!skip_warning) {
+ printk("cpu: ignoring processor beyond id %u\n", MAX_CPUS - 1);
+ skip_warning = 1;
+ }
+
return;
}
- cpu_array[cpu_boot_array_size].apic_id = apic_id;
- cpu_boot_array_size++;
+ cpu_array[cpu_array_size].apic_id = apic_id;
+ cpu_array_size++;
}
-static void __init
-cpu_mp_start_aps(void)
+void __init
+cpu_mp_probe(void)
+{
+ acpimp_setup();
+ printk("cpu: %u processor(s) configured\n", cpu_array_size);
+}
+
+void __init
+cpu_mp_setup(void)
{
uint16_t reset_vector[2];
struct cpu *cpu;
@@ -486,7 +488,7 @@ cpu_mp_start_aps(void)
size_t map_size;
unsigned int i;
- if (cpu_boot_array_size == 1)
+ if (cpu_array_size == 1)
return;
assert(BOOT_MP_TRAMPOLINE_ADDR < BIOSMEM_BASE);
@@ -522,7 +524,7 @@ cpu_mp_start_aps(void)
* Preallocate stacks now, as the kernel mappings shouldn't change while
* the APs are starting.
*/
- for (i = 1; i < cpu_boot_array_size; i++) {
+ for (i = 1; i < cpu_array_size; i++) {
cpu = &cpu_array[i];
cpu->double_fault_stack = vm_kmem_alloc(STACK_SIZE);
@@ -530,7 +532,7 @@ cpu_mp_start_aps(void)
panic("cpu: unable to allocate double fault stack for cpu%u", i);
}
- for (i = 1; i < cpu_boot_array_size; i++) {
+ for (i = 1; i < cpu_array_size; i++) {
cpu = &cpu_array[i];
boot_ap_id = i;
@@ -548,27 +550,7 @@ cpu_mp_start_aps(void)
cpu_pause();
}
- cpu_array_size = cpu_boot_array_size;
-}
-
-static void __init
-cpu_mp_info(void)
-{
- printk("cpu: %u processor(s) configured\n", cpu_array_size);
-}
-
-void __init
-cpu_mp_setup(void)
-{
- acpimp_setup();
- cpu_mp_start_aps();
- cpu_mp_info();
-}
-
-void __init
-cpu_mp_sync(void)
-{
- cpu_mp_synced = 1;
+ pmap_mp_setup();
}
void __init
@@ -579,13 +561,6 @@ cpu_ap_setup(void)
lapic_ap_setup();
}
-void __init
-cpu_ap_sync(void)
-{
- while (!cpu_mp_synced)
- cpu_pause();
-}
-
void
cpu_halt_broadcast(void)
{
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 3c13911b..0147c357 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun.
+ * Copyright (c) 2010-2014 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -603,12 +603,12 @@ void cpu_mp_register_lapic(unsigned int apic_id, int is_bsp);
*
* On return, cpu_count() gives the actual number of managed processors.
*/
-void cpu_mp_setup(void);
+void cpu_mp_probe(void);
/*
- * Synchronize with APs on kernel entry.
+ * Start application processors.
*/
-void cpu_mp_sync(void);
+void cpu_mp_setup(void);
/*
* CPU initialization on APs.
@@ -616,11 +616,6 @@ void cpu_mp_sync(void);
void cpu_ap_setup(void);
/*
- * Synchronize with BSP on kernel entry.
- */
-void cpu_ap_sync(void);
-
-/*
* Send a scheduling interrupt to a remote processor.
*/
static inline void
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 4a8c437a..673c6794 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -195,6 +195,7 @@ struct pmap_update_data {
} __aligned(CPU_L1_SIZE);
static struct pmap_update_data pmap_update_data[MAX_CPUS];
+static int pmap_allow_remote_updates __read_mostly;
/*
* Global list of physical maps.
@@ -707,7 +708,7 @@ pmap_update(struct pmap *pmap, unsigned long start, unsigned long end)
pmap_assert_range(pmap, start, end);
- if (cpu_count() == 1) {
+ if (!pmap_allow_remote_updates) {
pmap_update_local(pmap, start, end);
return;
}
@@ -875,6 +876,15 @@ pmap_setup(void)
pmap_ready = 1;
}
+void __init
+pmap_mp_setup(void)
+{
+ if (cpu_count() == 1)
+ return;
+
+ pmap_allow_remote_updates = 1;
+}
+
int
pmap_create(struct pmap **pmapp)
{
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index ea5692d7..2a34a8f5 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun.
+ * Copyright (c) 2010-2014 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -222,6 +222,13 @@ void pmap_update_intr(struct trap_frame *frame);
void pmap_setup(void);
/*
+ * Set up the pmap module for multiprocessor operations.
+ *
+ * This function basically enables pmap updates across processors.
+ */
+void pmap_mp_setup(void);
+
+/*
* Create a pmap for a user task.
*/
int pmap_create(struct pmap **pmapp);
diff --git a/kern/kernel.c b/kern/kernel.c
index d636d54a..ea11bf2b 100644
--- a/kern/kernel.c
+++ b/kern/kernel.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, 2013 Richard Braun.
+ * Copyright (c) 2011-2014 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,10 +31,6 @@ kernel_main(void)
{
assert(!cpu_intr_enabled());
- /* Enable interrupts to allow inter-processor pmap updates */
- cpu_intr_enable();
-
- /* Initialize the kernel */
rdxtree_setup();
cpumap_setup();
task_setup();
@@ -42,11 +38,13 @@ kernel_main(void)
work_setup();
llsync_setup();
- /* Rendezvous with APs */
- cpu_mp_sync();
+ /*
+ * Enabling application processors must be the last step before starting
+ * the scheduler.
+ */
+ cpu_mp_setup();
- /* Run the scheduler */
- thread_run();
+ thread_run_scheduler();
/* Never reached */
}
@@ -56,17 +54,7 @@ kernel_ap_main(void)
{
assert(!cpu_intr_enabled());
- /*
- * Enable interrupts to allow inter-processor pmap updates while the BSP
- * is initializing the kernel.
- */
- cpu_intr_enable();
-
- /* Wait for the BSP to complete kernel initialization */
- cpu_ap_sync();
-
- /* Run the scheduler */
- thread_run();
+ thread_run_scheduler();
/* Never reached */
}
diff --git a/kern/thread.c b/kern/thread.c
index 1d679bb6..3606279d 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1324,17 +1324,14 @@ thread_bootstrap_common(unsigned int cpu)
cpumap_set(&thread_active_runqs, cpu);
- booter = &thread_booters[cpu];
-
/* Initialize only what's needed during bootstrap */
+ booter = &thread_booters[cpu];
booter->flags = 0;
booter->preempt = 1;
booter->sched_class = THREAD_SCHED_CLASS_IDLE;
cpumap_fill(&booter->cpumap);
booter->task = kernel_task;
-
thread_runq_init(&thread_runqs[cpu], booter);
- tcb_set_current(&booter->tcb);
}
void __init
@@ -1380,12 +1377,13 @@ thread_bootstrap(void)
thread_ts_highest_round = THREAD_TS_INITIAL_ROUND;
thread_bootstrap_common(0);
+ tcb_set_current(&thread_booters[0].tcb);
}
void __init
thread_ap_bootstrap(void)
{
- thread_bootstrap_common(cpu_id());
+ tcb_set_current(&thread_booters[cpu_id()].tcb);
}
static void
@@ -1706,7 +1704,10 @@ thread_setup_runq(struct thread_runq *runq)
void __init
thread_setup(void)
{
- int i;
+ int cpu;
+
+ for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++)
+ thread_bootstrap_common(cpu);
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread),
CPU_L1_SIZE, NULL, NULL, NULL, 0);
@@ -1715,8 +1716,8 @@ thread_setup(void)
thread_setup_reaper();
- cpumap_for_each(&thread_active_runqs, i)
- thread_setup_runq(&thread_runqs[i]);
+ cpumap_for_each(&thread_active_runqs, cpu)
+ thread_setup_runq(&thread_runqs[cpu]);
}
int
@@ -1878,12 +1879,12 @@ thread_wakeup(struct thread *thread)
}
void __init
-thread_run(void)
+thread_run_scheduler(void)
{
struct thread_runq *runq;
struct thread *thread;
- assert(cpu_intr_enabled());
+ assert(!cpu_intr_enabled());
runq = thread_runq_local();
llsync_register_cpu(thread_runq_id(runq));
@@ -1891,7 +1892,6 @@ thread_run(void)
assert(thread == runq->current);
assert(thread->preempt == 1);
- cpu_intr_disable();
spinlock_lock(&runq->lock);
thread = thread_runq_get_next(thread_runq_local());
diff --git a/kern/thread.h b/kern/thread.h
index ea6c82e4..0efb8268 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -239,9 +239,9 @@ void thread_wakeup(struct thread *thread);
/*
* Start running threads on the local processor.
*
- * Interrupts must be enabled when calling this function.
+ * Interrupts must be disabled when calling this function.
*/
-void __noreturn thread_run(void);
+void __noreturn thread_run_scheduler(void);
/*
* Make the calling thread release the processor.