diff options
author | Richard Braun <rbraun@sceen.net> | 2018-07-30 20:55:20 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2018-07-30 20:55:20 +0200 |
commit | 5f202c9f744a5d9c5b751038edd2379b3d244227 (patch) | |
tree | c5bce5b9e1d9c4b01dfed4ff941ad9944814b93c /kern/thread.c | |
parent | d3e43f5bfda0bdad7a829a7ed8c1272a395b196b (diff) |
Rework assertive functions
Instead of combining assertions and checking into single functions,
rework those into pure checking functions usable with assert().
Those functions were introduced because of warnings about unused
functions/variables caused by an earlier implementation of assert().
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kern/thread.c b/kern/thread.c index 6625163a..b2e360b9 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -485,7 +485,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(!thread->in_runq); ops = thread_get_real_sched_ops(thread); @@ -512,7 +512,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->in_runq); runq->nr_threads--; @@ -533,7 +533,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); ops = thread_get_real_sched_ops(thread); @@ -549,7 +549,7 @@ thread_runq_get_next(struct thread_runq *runq) unsigned int i; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -582,7 +582,7 @@ static void thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) { assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->state == THREAD_RUNNING); thread_runq_add(runq, thread); @@ -636,7 +636,7 @@ thread_runq_schedule(struct thread_runq *runq) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); thread_clear_flag(prev, THREAD_YIELD); thread_runq_put_prev(runq, prev); @@ -687,7 +687,7 @@ thread_runq_schedule(struct thread_runq *runq) assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); return runq; } @@ -2846,7 +2846,7 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, bool requeue, current; td = thread_turnstile_td(thread); - turnstile_td_assert_lock(td); + assert(turnstile_td_locked(td)); ops = thread_get_sched_ops(thread_policy_to_class(policy)); global_priority = ops->get_global_priority(priority); |