diff options
author | Richard Braun <rbraun@sceen.net> | 2014-06-10 21:14:51 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2014-06-10 21:14:51 +0200 |
commit | 73a935a3e8f12447d455bcf4a1a01c51907a53a0 (patch) | |
tree | 7fbde09a59bcb4f2193a905e91ef9b805dd49746 /kern/thread.c | |
parent | f0e77fb79581c9227f758ad014a3c2778ae9d2f5 (diff) |
kern/llsync: rework lockless synchronization
Use a global checkpoint identifier as a generation counter and remove
reset interrupts.
For some reason I can't remember, using reset interrupts was thought to
be more efficient, perhaps because accessing a global variable on each
checkpoint looked expensive. But it's really not scalable, and a
read-mostly global variable can get cached locally and not incur expensive
access.
In addition, add a decent amount of documentation about the semantics
with regard to the rest of the system. Explicitely state that checkpoints
are triggered by context switches and that it's not allowed to block
inside read-side critical sections. Make periodic events attempt to trigger
checkpoints too. Add a thread-local read-side critical section nesting
counter so that it can be reliably determined whether the processor is
running a read-side critical section or not.
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/kern/thread.c b/kern/thread.c index 60f57904..9ed55d3c 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -482,7 +482,7 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev) assert(!cpu_intr_enabled()); spinlock_assert_locked(&runq->lock); - llsync_checkin(thread_runq_id(runq)); + llsync_report_context_switch(); thread_clear_flag(prev, THREAD_YIELD); thread_runq_put_prev(runq, prev); @@ -1463,6 +1463,7 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, thread->state = THREAD_SLEEPING; thread->preempt = 2; thread->pinned = 0; + thread->llsync_read = 0; thread->sched_policy = attr->policy; thread->sched_class = thread_policy_table[attr->policy]; cpumap_copy(&thread->cpumap, cpumap); @@ -1679,14 +1680,14 @@ static void thread_idle(void *arg) { struct thread *self; - unsigned int cpu; + + (void)arg; self = thread_self(); - cpu = thread_runq_id(arg); for (;;) { thread_preempt_disable(); - llsync_unregister_cpu(cpu); + llsync_unregister(); for (;;) { cpu_intr_disable(); @@ -1699,7 +1700,7 @@ thread_idle(void *arg) cpu_idle(); } - llsync_register_cpu(cpu); + llsync_register(); thread_preempt_enable(); } } @@ -1735,7 +1736,7 @@ thread_setup_idler(struct thread_runq *runq) thread_attr_init(&attr, name); thread_attr_set_cpumap(&attr, cpumap); thread_attr_set_policy(&attr, THREAD_SCHED_POLICY_IDLE); - error = thread_init(idler, stack, &attr, thread_idle, runq); + error = thread_init(idler, stack, &attr, thread_idle, NULL); if (error) panic("thread: unable to initialize idler thread"); @@ -1946,7 +1947,7 @@ thread_run_scheduler(void) assert(!cpu_intr_enabled()); runq = thread_runq_local(); - llsync_register_cpu(thread_runq_id(runq)); + llsync_register(); thread = thread_self(); assert(thread == runq->current); assert(thread->preempt == 1); @@ -2003,7 +2004,7 @@ thread_tick_intr(void) runq = thread_runq_local(); evcnt_inc(&runq->ev_tick); - llsync_commit_checkpoint(thread_runq_id(runq)); + llsync_report_periodic_event(); thread = thread_self(); spinlock_lock(&runq->lock); |