summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-12-03 15:11:23 +0100
committerRichard Braun <rbraun@sceen.net>2017-12-03 15:11:23 +0100
commitc7d2507fc455421429a531d722947ab415259c77 (patch)
tree52609a956ec9e3bd1f1274aa9d799597866bfcd1 /kern/thread.c
parent303ed5305f8dae17ab46aa1e0dc6086d4277808c (diff)
Revert "Make assert have no side effects"
This reverts commit d18d0e85596f90e0bd597b33d58209d0b3973c95.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 09e15aa0..5bc643c9 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -476,7 +476,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
assert(!thread->in_runq);
ops = thread_get_real_sched_ops(thread);
@@ -503,7 +503,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
assert(thread->in_runq);
runq->nr_threads--;
@@ -524,7 +524,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
ops = thread_get_real_sched_ops(thread);
@@ -540,7 +540,7 @@ thread_runq_get_next(struct thread_runq *runq)
unsigned int i;
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) {
thread = thread_sched_ops[i].get_next(runq);
@@ -573,7 +573,7 @@ static void
thread_runq_wakeup(struct thread_runq *runq, struct thread *thread)
{
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
assert(thread->state == THREAD_RUNNING);
thread_runq_add(runq, thread);
@@ -613,7 +613,7 @@ thread_runq_schedule(struct thread_runq *runq)
&& (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE)));
assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
llsync_report_context_switch();
@@ -658,7 +658,7 @@ thread_runq_schedule(struct thread_runq *runq)
assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- assert(spinlock_locked(&runq->lock));
+ spinlock_assert_locked(&runq->lock);
return runq;
}
@@ -763,8 +763,7 @@ thread_sched_rt_get_next(struct thread_runq *runq)
}
static void
-thread_sched_rt_reset_priority(struct thread *thread,
- __unused unsigned short priority)
+thread_sched_rt_reset_priority(struct thread *thread, unsigned short priority)
{
assert(priority <= THREAD_SCHED_RT_PRIO_MAX);
thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE;
@@ -1886,13 +1885,13 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags)
static void *
thread_alloc_stack(void)
{
- __unused struct vm_page *first_page, *last_page;
+ struct vm_page *first_page, *last_page;
phys_addr_t first_pa, last_pa;
struct pmap *kernel_pmap;
size_t stack_size;
uintptr_t va;
void *mem;
- __unused int error;
+ int error;
kernel_pmap = pmap_get_kernel_pmap();
@@ -2551,7 +2550,7 @@ void
thread_sleep(struct spinlock *interlock, const void *wchan_addr,
const char *wchan_desc)
{
- __unused int error;
+ int error;
error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0);
assert(!error);
@@ -2781,14 +2780,14 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy,
unsigned short priority)
{
const struct thread_sched_ops *ops;
- __unused struct turnstile_td *td;
struct thread_runq *runq;
+ struct turnstile_td *td;
unsigned int global_priority;
unsigned long flags;
bool requeue, current;
td = thread_turnstile_td(thread);
- assert(turnstile_td_locked(td));
+ turnstile_td_assert_lock(td);
ops = thread_get_sched_ops(thread_policy_to_class(policy));
global_priority = ops->get_global_priority(priority);