summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-09-01 23:58:41 +0200
committerRichard Braun <rbraun@sceen.net>2017-09-02 15:25:37 +0200
commitd18d0e85596f90e0bd597b33d58209d0b3973c95 (patch)
tree6c3472f59cf64244ab86d2fc13b220b1c8f61165 /kern
parent897ad6a062ea2a32a2759613608faf3271211832 (diff)
Make assert have no side effects
This makes sure symbols referenced by assert uses may not be generated if unused. The recently introduced __unused macro is used to suppress compiler warnings resulting from this change.
Diffstat (limited to 'kern')
-rw-r--r--kern/cbuf.c2
-rw-r--r--kern/condition.c2
-rw-r--r--kern/kmem.c2
-rw-r--r--kern/log.c2
-rw-r--r--kern/mutex/mutex_adaptive.c3
-rw-r--r--kern/mutex/mutex_plain.c3
-rw-r--r--kern/rtmutex.c2
-rw-r--r--kern/semaphore.c3
-rw-r--r--kern/sleepq.c29
-rw-r--r--kern/spinlock.h10
-rw-r--r--kern/sref.c2
-rw-r--r--kern/task.h5
-rw-r--r--kern/thread.c27
-rw-r--r--kern/thread.h3
-rw-r--r--kern/timer.c2
-rw-r--r--kern/turnstile.c30
-rw-r--r--kern/turnstile.h8
17 files changed, 76 insertions, 59 deletions
diff --git a/kern/cbuf.c b/kern/cbuf.c
index 2093f428..eca0c33c 100644
--- a/kern/cbuf.c
+++ b/kern/cbuf.c
@@ -72,7 +72,7 @@ cbuf_push(struct cbuf *cbuf, const void *buf, size_t size, bool erase)
int
cbuf_pop(struct cbuf *cbuf, void *buf, size_t *sizep)
{
- int error;
+ __unused int error;
if (cbuf_size(cbuf) == 0) {
return ERROR_AGAIN;
diff --git a/kern/condition.c b/kern/condition.c
index e6d65951..b12068fa 100644
--- a/kern/condition.c
+++ b/kern/condition.c
@@ -88,7 +88,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex,
void
condition_wait(struct condition *condition, struct mutex *mutex)
{
- int error;
+ __unused int error;
error = condition_wait_common(condition, mutex, false, 0);
assert(!error);
diff --git a/kern/kmem.c b/kern/kmem.c
index d98be0d6..5423c0a0 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -646,7 +646,7 @@ kmem_cache_register(struct kmem_cache *cache, struct kmem_slab *slab)
uintptr_t va, end;
phys_addr_t pa;
bool virtual;
- int error;
+ __unused int error;
assert(kmem_cache_registration_required(cache));
assert(slab->nr_refs == 0);
diff --git a/kern/log.c b/kern/log.c
index a48afc5a..10131d12 100644
--- a/kern/log.c
+++ b/kern/log.c
@@ -459,7 +459,7 @@ INIT_OP_DEFINE(log_start,
static void
log_write(const void *s, size_t size)
{
- int error;
+ __unused int error;
error = cbuf_push(&log_cbuf, s, size, true);
assert(!error);
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
index 68b006ad..8da55270 100644
--- a/kern/mutex/mutex_adaptive.c
+++ b/kern/mutex/mutex_adaptive.c
@@ -24,6 +24,7 @@
#include <kern/clock.h>
#include <kern/error.h>
#include <kern/init.h>
+#include <kern/macros.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
@@ -235,7 +236,7 @@ out:
void
mutex_adaptive_lock_slow(struct mutex *mutex)
{
- int error;
+ __unused int error;
error = mutex_adaptive_lock_slow_common(mutex, false, 0);
assert(!error);
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
index 2c655940..abfa3396 100644
--- a/kern/mutex/mutex_plain.c
+++ b/kern/mutex/mutex_plain.c
@@ -22,6 +22,7 @@
#include <kern/atomic.h>
#include <kern/init.h>
+#include <kern/macros.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
@@ -133,7 +134,7 @@ out:
void
mutex_plain_lock_slow(struct mutex *mutex)
{
- int error;
+ __unused int error;
error = mutex_plain_lock_slow_common(mutex, false, 0);
assert(!error);
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index c07bbfef..09637514 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -183,7 +183,7 @@ out:
void
rtmutex_lock_slow(struct rtmutex *rtmutex)
{
- int error;
+ __unused int error;
error = rtmutex_lock_slow_common(rtmutex, false, 0);
assert(!error);
diff --git a/kern/semaphore.c b/kern/semaphore.c
index 72e843a9..003f053c 100644
--- a/kern/semaphore.c
+++ b/kern/semaphore.c
@@ -20,6 +20,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <kern/macros.h>
#include <kern/semaphore.h>
#include <kern/semaphore_i.h>
#include <kern/sleepq.h>
@@ -63,7 +64,7 @@ semaphore_wait_slow_common(struct semaphore *semaphore,
void
semaphore_wait_slow(struct semaphore *semaphore)
{
- int error;
+ __unused int error;
error = semaphore_wait_slow_common(semaphore, false, 0);
assert(!error);
diff --git a/kern/sleepq.c b/kern/sleepq.c
index 170b1a9f..0c20ae42 100644
--- a/kern/sleepq.c
+++ b/kern/sleepq.c
@@ -110,14 +110,14 @@ sleepq_waiter_wakeup(struct sleepq_waiter *waiter)
thread_wakeup(waiter->thread);
}
-static void
-sleepq_assert_init_state(const struct sleepq *sleepq)
+__unused static bool
+sleepq_state_initialized(const struct sleepq *sleepq)
{
- assert(sleepq->bucket == NULL);
- assert(sleepq->sync_obj == NULL);
- assert(list_empty(&sleepq->waiters));
- assert(sleepq->oldest_waiter == NULL);
- assert(sleepq->next_free == NULL);
+ return ((sleepq->bucket == NULL)
+ && (sleepq->sync_obj == NULL)
+ && (list_empty(&sleepq->waiters))
+ && (sleepq->oldest_waiter == NULL)
+ && (sleepq->next_free == NULL));
}
static void
@@ -134,7 +134,7 @@ sleepq_unuse(struct sleepq *sleepq)
sleepq->sync_obj = NULL;
}
-static bool
+__unused static bool
sleepq_in_use(const struct sleepq *sleepq)
{
return sleepq->sync_obj != NULL;
@@ -186,7 +186,8 @@ sleepq_bucket_add(struct sleepq_bucket *bucket, struct sleepq *sleepq)
}
static void
-sleepq_bucket_remove(struct sleepq_bucket *bucket, struct sleepq *sleepq)
+sleepq_bucket_remove(__unused struct sleepq_bucket *bucket,
+ struct sleepq *sleepq)
{
assert(sleepq->bucket == bucket);
sleepq->bucket = NULL;
@@ -253,14 +254,14 @@ sleepq_create(void)
return NULL;
}
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_state_initialized(sleepq));
return sleepq;
}
void
sleepq_destroy(struct sleepq *sleepq)
{
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_state_initialized(sleepq));
kmem_cache_free(&sleepq_cache, sleepq);
}
@@ -352,7 +353,7 @@ sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags)
assert(sync_obj != NULL);
sleepq = thread_sleepq_lend();
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_state_initialized(sleepq));
bucket = sleepq_bucket_get(sync_obj, condition);
@@ -390,7 +391,7 @@ sleepq_return(struct sleepq *sleepq, unsigned long flags)
spinlock_unlock_intr_restore(&bucket->lock, flags);
- sleepq_assert_init_state(free_sleepq);
+ assert(sleepq_state_initialized(free_sleepq));
thread_sleepq_return(free_sleepq);
}
@@ -468,7 +469,7 @@ sleepq_wait_common(struct sleepq *sleepq, const char *wchan,
void
sleepq_wait(struct sleepq *sleepq, const char *wchan)
{
- int error;
+ __unused int error;
error = sleepq_wait_common(sleepq, wchan, false, 0);
assert(!error);
diff --git a/kern/spinlock.h b/kern/spinlock.h
index d4105da0..50ac5401 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -26,6 +26,8 @@
#ifndef _KERN_SPINLOCK_H
#define _KERN_SPINLOCK_H
+#include <stdbool.h>
+
#include <kern/init.h>
#include <kern/macros.h>
#include <kern/spinlock_i.h>
@@ -35,13 +37,17 @@
struct spinlock;
-#define spinlock_assert_locked(lock) assert((lock)->value != SPINLOCK_UNLOCKED)
-
/*
* Initialize a spin lock.
*/
void spinlock_init(struct spinlock *lock);
+static inline bool
+spinlock_locked(const struct spinlock *lock)
+{
+ return lock->value != SPINLOCK_UNLOCKED;
+}
+
/*
* Attempt to lock the given spin lock.
*
diff --git a/kern/sref.c b/kern/sref.c
index 3603e781..95daeb0f 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -242,7 +242,7 @@ sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2)
queue1->size += queue2->size;
}
-static inline bool
+__unused static inline bool
sref_counter_aligned(const struct sref_counter *counter)
{
return (((uintptr_t)counter & (~SREF_WEAKREF_MASK)) == 0);
diff --git a/kern/task.h b/kern/task.h
index 1711fbee..149ff49d 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Richard Braun.
+ * Copyright (c) 2012-2017 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include <kern/atomic.h>
#include <kern/init.h>
#include <kern/list.h>
+#include <kern/macros.h>
#include <kern/spinlock.h>
#include <kern/thread.h>
#include <vm/vm_map.h>
@@ -50,7 +51,7 @@ extern struct task *kernel_task;
static inline void
task_ref(struct task *task)
{
- unsigned long nr_refs;
+ __unused unsigned long nr_refs;
nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED);
assert(nr_refs != (unsigned long)-1);
diff --git a/kern/thread.c b/kern/thread.c
index 4a9cb2a1..5d8ac11d 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -475,7 +475,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(!thread->in_runq);
ops = thread_get_real_sched_ops(thread);
@@ -502,7 +502,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(thread->in_runq);
runq->nr_threads--;
@@ -523,7 +523,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
ops = thread_get_real_sched_ops(thread);
@@ -539,7 +539,7 @@ thread_runq_get_next(struct thread_runq *runq)
unsigned int i;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) {
thread = thread_sched_ops[i].get_next(runq);
@@ -572,7 +572,7 @@ static void
thread_runq_wakeup(struct thread_runq *runq, struct thread *thread)
{
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(thread->state == THREAD_RUNNING);
thread_runq_add(runq, thread);
@@ -612,7 +612,7 @@ thread_runq_schedule(struct thread_runq *runq)
&& (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE)));
assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
llsync_report_context_switch();
@@ -657,7 +657,7 @@ thread_runq_schedule(struct thread_runq *runq)
assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
return runq;
}
@@ -762,7 +762,8 @@ thread_sched_rt_get_next(struct thread_runq *runq)
}
static void
-thread_sched_rt_reset_priority(struct thread *thread, unsigned short priority)
+thread_sched_rt_reset_priority(struct thread *thread,
+ __unused unsigned short priority)
{
assert(priority <= THREAD_SCHED_RT_PRIO_MAX);
thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE;
@@ -1888,12 +1889,12 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags)
static void *
thread_alloc_stack(void)
{
- struct vm_page *first_page, *last_page;
+ __unused struct vm_page *first_page, *last_page;
phys_addr_t first_pa, last_pa;
size_t stack_size;
uintptr_t va;
void *mem;
- int error;
+ __unused int error;
stack_size = vm_page_round(TCB_STACK_SIZE);
mem = vm_kmem_alloc((PAGE_SIZE * 2) + stack_size);
@@ -2546,7 +2547,7 @@ void
thread_sleep(struct spinlock *interlock, const void *wchan_addr,
const char *wchan_desc)
{
- int error;
+ __unused int error;
error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0);
assert(!error);
@@ -2776,14 +2777,14 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy,
unsigned short priority)
{
const struct thread_sched_ops *ops;
+ __unused struct turnstile_td *td;
struct thread_runq *runq;
- struct turnstile_td *td;
unsigned int global_priority;
unsigned long flags;
bool requeue, current;
td = thread_turnstile_td(thread);
- turnstile_td_assert_lock(td);
+ assert(turnstile_td_locked(td));
ops = thread_get_sched_ops(thread_policy_to_class(policy));
global_priority = ops->get_global_priority(priority);
diff --git a/kern/thread.h b/kern/thread.h
index a3f2670d..1a811685 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -43,6 +43,7 @@
#include <kern/init.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
+#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <kern/turnstile_types.h>
#include <machine/cpu.h>
@@ -281,7 +282,7 @@ void thread_pi_setscheduler(struct thread *thread, unsigned char policy,
static inline void
thread_ref(struct thread *thread)
{
- unsigned long nr_refs;
+ __unused unsigned long nr_refs;
nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED);
assert(nr_refs != (unsigned long)-1);
diff --git a/kern/timer.c b/kern/timer.c
index 77a6bb36..57f1765f 100644
--- a/kern/timer.c
+++ b/kern/timer.c
@@ -135,7 +135,7 @@ timer_unlock_cpu_data(struct timer_cpu_data *cpu_data, unsigned long flags)
* Timer state functions.
*/
-static bool
+__unused static bool
timer_ready(const struct timer *timer)
{
return timer->state == TIMER_TS_READY;
diff --git a/kern/turnstile.c b/kern/turnstile.c
index e59a7f3f..a9b994d5 100644
--- a/kern/turnstile.c
+++ b/kern/turnstile.c
@@ -401,14 +401,14 @@ turnstile_td_propagate_priority(struct turnstile_td *td)
turnstile_td_propagate_priority_loop(td);
}
-static void
-turnstile_assert_init_state(const struct turnstile *turnstile)
+__unused static bool
+turnstile_state_initialized(const struct turnstile *turnstile)
{
- assert(turnstile->bucket == NULL);
- assert(turnstile->sync_obj == NULL);
- assert(plist_empty(&turnstile->waiters));
- assert(turnstile->next_free == NULL);
- assert(turnstile->top_waiter == NULL);
+ return ((turnstile->bucket == NULL)
+ && (turnstile->sync_obj == NULL)
+ && (plist_empty(&turnstile->waiters))
+ && (turnstile->next_free == NULL)
+ && (turnstile->top_waiter == NULL));
}
static void
@@ -425,7 +425,7 @@ turnstile_unuse(struct turnstile *turnstile)
turnstile->sync_obj = NULL;
}
-static bool
+__unused static bool
turnstile_in_use(const struct turnstile *turnstile)
{
return turnstile->sync_obj != NULL;
@@ -464,7 +464,7 @@ turnstile_bucket_add(struct turnstile_bucket *bucket,
}
static void
-turnstile_bucket_remove(struct turnstile_bucket *bucket,
+turnstile_bucket_remove(__unused struct turnstile_bucket *bucket,
struct turnstile *turnstile)
{
assert(turnstile->bucket == bucket);
@@ -529,14 +529,14 @@ turnstile_create(void)
return NULL;
}
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_state_initialized(turnstile));
return turnstile;
}
void
turnstile_destroy(struct turnstile *turnstile)
{
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_state_initialized(turnstile));
kmem_cache_free(&turnstile_cache, turnstile);
}
@@ -603,7 +603,7 @@ turnstile_lend(const void *sync_obj)
assert(sync_obj != NULL);
turnstile = thread_turnstile_lend();
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_state_initialized(turnstile));
td = thread_turnstile_td(thread_self());
bucket = turnstile_bucket_get(sync_obj);
@@ -653,7 +653,7 @@ turnstile_return(struct turnstile *turnstile)
spinlock_unlock(&bucket->lock);
- turnstile_assert_init_state(free_turnstile);
+ assert(turnstile_state_initialized(free_turnstile));
thread_turnstile_return(free_turnstile);
}
@@ -774,7 +774,7 @@ void
turnstile_wait(struct turnstile *turnstile, const char *wchan,
struct thread *owner)
{
- int error;
+ __unused int error;
error = turnstile_wait_common(turnstile, wchan, owner, false, 0);
assert(!error);
@@ -806,7 +806,7 @@ turnstile_own(struct turnstile *turnstile)
{
struct turnstile_td *td;
struct thread *owner;
- unsigned int top_priority;
+ __unused unsigned int top_priority;
assert(turnstile->owner == NULL);
diff --git a/kern/turnstile.h b/kern/turnstile.h
index e7b4a5e3..f7ba0847 100644
--- a/kern/turnstile.h
+++ b/kern/turnstile.h
@@ -43,8 +43,6 @@ struct turnstile;
*/
struct turnstile_td;
-#define turnstile_td_assert_lock(td) spinlock_assert_locked(&(td)->lock)
-
/*
* Initialize turnstile thread data.
*/
@@ -62,6 +60,12 @@ turnstile_td_init(struct turnstile_td *td)
* Turnstile thread data locking functions.
*/
+static inline bool
+turnstile_td_locked(struct turnstile_td *td)
+{
+ return spinlock_locked(&(td)->lock);
+}
+
static inline void
turnstile_td_lock(struct turnstile_td *td)
{