diff options
-rw-r--r-- | arch/x86/machine/boot.c | 2 | ||||
-rw-r--r-- | kern/spinlock.h | 17 | ||||
-rw-r--r-- | kern/thread.c | 23 | ||||
-rw-r--r-- | kern/thread.h | 8 |
4 files changed, 43 insertions, 7 deletions
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c index f25606a0..98c3b9ce 100644 --- a/arch/x86/machine/boot.c +++ b/arch/x86/machine/boot.c @@ -52,6 +52,7 @@ #include <kern/stddef.h> #include <kern/stdint.h> #include <kern/string.h> +#include <kern/thread.h> #include <machine/biosmem.h> #include <machine/boot.h> #include <machine/cga.h> @@ -268,6 +269,7 @@ boot_main(void) { trap_setup(); cpu_setup(); + thread_bootstrap(); pmap_bootstrap(); cga_setup(); boot_show_version(); diff --git a/kern/spinlock.h b/kern/spinlock.h index 53e83932..2767d995 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -25,6 +25,7 @@ #define _KERN_SPINLOCK_H #include <kern/assert.h> +#include <kern/thread.h> #include <machine/atomic.h> #include <machine/cpu.h> @@ -55,12 +56,21 @@ spinlock_assert_locked(struct spinlock *lock) /* * Attempt to acquire a spin lock. * - * Return false if acquired, true if busy. + * Return true if acquired, false if busy. */ static inline int spinlock_trylock(struct spinlock *lock) { - return atomic_cas(&lock->locked, 0, 1); + unsigned long busy; + + thread_preempt_disable(); + busy = atomic_cas(&lock->locked, 0, 1); + + if (!busy) + return 1; + + thread_preempt_enable(); + return 0; } /* @@ -69,6 +79,8 @@ spinlock_trylock(struct spinlock *lock) static inline void spinlock_lock(struct spinlock *lock) { + thread_preempt_disable(); + while (atomic_cas(&lock->locked, 0, 1)) cpu_pause(); } @@ -80,6 +92,7 @@ static inline void spinlock_unlock(struct spinlock *lock) { atomic_swap(&lock->locked, 0); + thread_preempt_enable(); } #endif /* _KERN_SPINLOCK_H */ diff --git a/kern/thread.c b/kern/thread.c index 959208ab..236c6145 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -31,6 +31,12 @@ struct thread_runq thread_runqs[MAX_CPUS]; /* + * Statically allocated thread which prevents initialization code from + * crashing when implicitely using preemption control operations. + */ +static struct thread thread_dummy __initdata; + +/* * Caches for allocated threads and their stacks. */ static struct kmem_cache thread_cache; @@ -39,7 +45,7 @@ static struct kmem_cache thread_stack_cache; static void __init thread_runq_init(struct thread_runq *runq) { - runq->current = NULL; + runq->current = &thread_dummy; list_init(&runq->threads); } @@ -65,17 +71,24 @@ thread_runq_dequeue(struct thread_runq *runq) } void __init -thread_setup(void) +thread_bootstrap(void) { size_t i; + thread_dummy.flags = 0; + thread_dummy.preempt = 0; + + for (i = 0; i < ARRAY_SIZE(thread_runqs); i++) + thread_runq_init(&thread_runqs[i]); +} + +void __init +thread_setup(void) +{ kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), CPU_L1_SIZE, NULL, NULL, NULL, 0); kmem_cache_init(&thread_stack_cache, "thread_stack", STACK_SIZE, CPU_L1_SIZE, NULL, NULL, NULL, 0); - - for (i = 0; i < ARRAY_SIZE(thread_runqs); i++) - thread_runq_init(&thread_runqs[i]); } static void diff --git a/kern/thread.h b/kern/thread.h index 3c6395c8..755f3056 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -63,6 +63,14 @@ struct thread_runq { extern struct thread_runq thread_runqs[MAX_CPUS]; /* + * Early initialization of the thread module. + * + * This function makes it possible to use preemption control operations while + * the system is initializing itself. + */ +void thread_bootstrap(void); + +/* * Initialize the thread module. */ void thread_setup(void); |