diff options
-rw-r--r-- | kern/thread.c | 35 | ||||
-rw-r--r-- | kern/thread.h | 47 | ||||
-rw-r--r-- | kern/thread_i.h | 16 |
3 files changed, 48 insertions, 50 deletions
diff --git a/kern/thread.c b/kern/thread.c index 6f2f2d8f..601cc3fe 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -128,7 +128,7 @@ * - preemption disabled * - run queue locked * - * Locking the run queue increases the preemption counter once more, + * Locking the run queue increases the preemption level once more, * making its value 2. */ #define THREAD_SUSPEND_PREEMPT_LEVEL 2 @@ -620,7 +620,7 @@ thread_runq_schedule(struct thread_runq *runq) assert((__builtin_frame_address(0) >= prev->stack) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); - assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); + assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); spinlock_assert_locked(&runq->lock); @@ -639,7 +639,7 @@ thread_runq_schedule(struct thread_runq *runq) next = thread_runq_get_next(runq); assert((next != runq->idler) || (runq->nr_threads == 0)); - assert(next->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); + assert(next->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); if (likely(prev != next)) { /* @@ -665,7 +665,7 @@ thread_runq_schedule(struct thread_runq *runq) thread_runq_schedule_prepare(prev); } - assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); + assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); spinlock_assert_locked(&runq->lock); return runq; @@ -1343,15 +1343,17 @@ thread_sched_fs_balance_pull(struct thread_runq *runq, } /* - * The pinned counter is changed without explicit synchronization. + * The pin level is changed without explicit synchronization. * However, it can only be changed by its owning thread. As threads * currently running aren't considered for migration, the thread had * to be preempted and invoke the scheduler. Since balancer threads * acquire the run queue lock, there is strong ordering between - * changing the pinned counter and setting the current thread of a + * changing the pin level and setting the current thread of a * run queue. + * + * TODO Review comment. */ - if (thread->pinned) { + if (thread->pin_level != 0) { continue; } @@ -1694,8 +1696,8 @@ thread_init_booter(unsigned int cpu) booter = &thread_booters[cpu]; booter->nr_refs = 0; /* Make sure booters aren't destroyed */ booter->flags = 0; - booter->intr = 0; - booter->preempt = 1; + booter->intr_level = 0; + booter->preempt_level = 1; cpumap_fill(&booter->cpumap); thread_set_user_sched_policy(booter, THREAD_SCHED_POLICY_IDLE); thread_set_user_sched_class(booter, THREAD_SCHED_CLASS_IDLE); @@ -1821,10 +1823,10 @@ thread_init(struct thread *thread, void *stack, turnstile_td_init(&thread->turnstile_td); thread->last_cond = NULL; thread->propagate_priority = false; - thread->intr = 0; - thread->preempt = THREAD_SUSPEND_PREEMPT_LEVEL; - thread->pinned = 0; - thread->llsync_read = 0; + thread->preempt_level = THREAD_SUSPEND_PREEMPT_LEVEL; + thread->pin_level = 0; + thread->intr_level = 0; + thread->llsync_level = 0; cpumap_copy(&thread->cpumap, cpumap); thread_set_user_sched_policy(thread, attr->policy); thread_set_user_sched_class(thread, thread_policy_to_class(attr->policy)); @@ -2468,7 +2470,7 @@ thread_wakeup_common(struct thread *thread, int error) thread_preempt_disable_intr_save(&flags); - if (!thread->pinned) { + if (thread->pin_level == 0) { runq = thread_get_real_sched_ops(thread)->select_runq(thread); } else { /* @@ -2517,7 +2519,6 @@ thread_sleep_common(struct spinlock *interlock, const void *wchan_addr, unsigned long flags; thread = thread_self(); - assert(thread->preempt == 1); if (timed) { waiter.thread = thread; @@ -2550,8 +2551,6 @@ thread_sleep_common(struct spinlock *interlock, const void *wchan_addr, thread_preempt_enable_no_resched(); } - assert(thread->preempt == 1); - return thread->wakeup_error; } @@ -2620,7 +2619,7 @@ thread_run_scheduler(void) runq = thread_runq_local(); thread = thread_self(); assert(thread == runq->current); - assert(thread->preempt == 1); + assert(thread->preempt_level == (THREAD_SUSPEND_PREEMPT_LEVEL - 1)); llsync_register(); sref_register(); diff --git a/kern/thread.h b/kern/thread.h index af8539d2..4761e1ec 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -539,7 +539,7 @@ void thread_propagate_priority(void); static inline int thread_pinned(void) { - return (thread_self()->pinned != 0); + return thread_self()->pin_level != 0; } static inline void @@ -548,8 +548,8 @@ thread_pin(void) struct thread *thread; thread = thread_self(); - thread->pinned++; - assert(thread->pinned != 0); + thread->pin_level++; + assert(thread->pin_level != 0); barrier(); } @@ -560,8 +560,8 @@ thread_unpin(void) barrier(); thread = thread_self(); - assert(thread->pinned != 0); - thread->pinned--; + assert(thread->pin_level != 0); + thread->pin_level--; } /* @@ -573,7 +573,7 @@ thread_unpin(void) static inline int thread_preempt_enabled(void) { - return (thread_self()->preempt == 0); + return thread_self()->preempt_level == 0; } static inline void @@ -582,8 +582,8 @@ thread_preempt_disable(void) struct thread *thread; thread = thread_self(); - thread->preempt++; - assert(thread->preempt != 0); + thread->preempt_level++; + assert(thread->preempt_level != 0); barrier(); } @@ -594,8 +594,8 @@ thread_preempt_enable_no_resched(void) barrier(); thread = thread_self(); - assert(thread->preempt != 0); - thread->preempt--; + assert(thread->preempt_level != 0); + thread->preempt_level--; /* * Don't perform priority propagation here, because this function is @@ -640,7 +640,7 @@ thread_preempt_enable_intr_restore(unsigned long flags) static inline bool thread_interrupted(void) { - return (thread_self()->intr != 0); + return thread_self()->intr_level != 0; } static inline bool @@ -658,12 +658,12 @@ thread_intr_enter(void) thread = thread_self(); - if (thread->intr == 0) { + if (thread->intr_level == 0) { thread_preempt_disable(); } - thread->intr++; - assert(thread->intr != 0); + thread->intr_level++; + assert(thread->intr_level != 0); barrier(); } @@ -674,17 +674,16 @@ thread_intr_leave(void) barrier(); thread = thread_self(); - assert(thread->intr != 0); - thread->intr--; + assert(thread->intr_level != 0); + thread->intr_level--; - if (thread->intr == 0) { + if (thread->intr_level == 0) { thread_preempt_enable_no_resched(); } } /* - * Lockless synchronization read-side critical section nesting counter - * control functions. + * Lockless synchronization read-side critical section level control functions. */ static inline int @@ -693,7 +692,7 @@ thread_llsync_in_read_cs(void) struct thread *thread; thread = thread_self(); - return (thread->llsync_read != 0); + return thread->llsync_level != 0; } static inline void @@ -702,8 +701,8 @@ thread_llsync_read_inc(void) struct thread *thread; thread = thread_self(); - thread->llsync_read++; - assert(thread->llsync_read != 0); + thread->llsync_level++; + assert(thread->llsync_level != 0); barrier(); } @@ -714,8 +713,8 @@ thread_llsync_read_dec(void) barrier(); thread = thread_self(); - assert(thread->llsync_read != 0); - thread->llsync_read--; + assert(thread->llsync_level != 0); + thread->llsync_level--; } /* diff --git a/kern/thread_i.h b/kern/thread_i.h index c8a85f57..067b31ed 100644 --- a/kern/thread_i.h +++ b/kern/thread_i.h @@ -135,17 +135,17 @@ struct thread { /* True if priority must be propagated when preemption is reenabled */ bool propagate_priority; /* (-) */ - /* Preemption counter, preemption is enabled if 0 */ - unsigned short preempt; /* (-) */ + /* Preemption level, preemption is enabled if 0 */ + unsigned short preempt_level; /* (-) */ - /* Pinning counter, migration is allowed if 0 */ - unsigned short pinned; /* (-) */ + /* Pin level, migration is allowed if 0 */ + unsigned short pin_level; /* (-) */ - /* Interrupt level counter, in thread context if 0 */ - unsigned short intr; /* (-) */ + /* Interrupt level, in thread context if 0 */ + unsigned short intr_level; /* (-) */ - /* Read-side critical section counter, not in any if 0 */ - unsigned short llsync_read; /* (-) */ + /* Read-side critical section level, not in any if 0 */ + unsigned short llsync_level; /* (-) */ /* Processors on which this thread is allowed to run */ struct cpumap cpumap; /* (r) */ |