diff options
author | Richard Braun <rbraun@sceen.net> | 2017-12-03 15:11:23 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-12-03 15:11:23 +0100 |
commit | c7d2507fc455421429a531d722947ab415259c77 (patch) | |
tree | 52609a956ec9e3bd1f1274aa9d799597866bfcd1 | |
parent | 303ed5305f8dae17ab46aa1e0dc6086d4277808c (diff) |
Revert "Make assert have no side effects"
This reverts commit d18d0e85596f90e0bd597b33d58209d0b3973c95.
-rw-r--r-- | arch/x86/machine/cga.c | 4 | ||||
-rw-r--r-- | arch/x86/machine/cpu.c | 4 | ||||
-rw-r--r-- | arch/x86/machine/ioapic.c | 2 | ||||
-rw-r--r-- | include/assert.h | 4 | ||||
-rw-r--r-- | kern/cbuf.c | 2 | ||||
-rw-r--r-- | kern/condition.c | 2 | ||||
-rw-r--r-- | kern/kmem.c | 2 | ||||
-rw-r--r-- | kern/log.c | 2 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive.c | 3 | ||||
-rw-r--r-- | kern/mutex/mutex_plain.c | 3 | ||||
-rw-r--r-- | kern/rtmutex.c | 2 | ||||
-rw-r--r-- | kern/semaphore.c | 3 | ||||
-rw-r--r-- | kern/sleepq.c | 29 | ||||
-rw-r--r-- | kern/spinlock.h | 10 | ||||
-rw-r--r-- | kern/sref.c | 2 | ||||
-rw-r--r-- | kern/task.h | 5 | ||||
-rw-r--r-- | kern/thread.c | 27 | ||||
-rw-r--r-- | kern/thread.h | 2 | ||||
-rw-r--r-- | kern/timer.c | 2 | ||||
-rw-r--r-- | kern/turnstile.c | 30 | ||||
-rw-r--r-- | kern/turnstile.h | 8 | ||||
-rw-r--r-- | vm/vm_kmem.c | 4 | ||||
-rw-r--r-- | vm/vm_page.c | 2 | ||||
-rw-r--r-- | vm/vm_page.h | 2 |
24 files changed, 69 insertions, 87 deletions
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c index d086326d..b519ba1f 100644 --- a/arch/x86/machine/cga.c +++ b/arch/x86/machine/cga.c @@ -216,7 +216,7 @@ static void cga_bbuf_redraw(struct cga_bbuf *bbuf) { size_t size; - __unused int error; + int error; size = CGA_MEMORY_SIZE; error = cbuf_read(&bbuf->cbuf, bbuf->view, cga_memory, &size); @@ -298,7 +298,7 @@ cga_bbuf_newline(struct cga_bbuf *bbuf) { uint16_t cursor = 0, spaces[CGA_COLUMNS]; size_t i, nr_spaces, offset, size; - __unused int error; + int error; cga_bbuf_reset_view(bbuf); diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c index e971d99c..98d3680e 100644 --- a/arch/x86/machine/cpu.c +++ b/arch/x86/machine/cpu.c @@ -466,9 +466,7 @@ cpu_init(struct cpu *cpu) cpu->phys_addr_width = 0; cpu->virt_addr_width = 0; - if (max_basic == 0) { - panic("cpu: unsupported maximum input value for basic information"); - } + assert(max_basic >= 1); eax = 1; cpu_cpuid(&eax, &ebx, &ecx, &edx); diff --git a/arch/x86/machine/ioapic.c b/arch/x86/machine/ioapic.c index d70ec92b..82fbd6d5 100644 --- a/arch/x86/machine/ioapic.c +++ b/arch/x86/machine/ioapic.c @@ -209,7 +209,7 @@ ioapic_create(unsigned int apic_id, uintptr_t addr, unsigned int gsi_base) return ioapic; } -__unused static bool +static bool ioapic_has_gsi(const struct ioapic *ioapic, unsigned int gsi) { return ((gsi >= ioapic->first_gsi) && (gsi <= ioapic->last_gsi)); diff --git a/include/assert.h b/include/assert.h index 9bf3bdf0..8fdc2fe2 100644 --- a/include/assert.h +++ b/include/assert.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2017 Richard Braun. + * Copyright (c) 2010 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ #define static_assert _Static_assert #ifdef NDEBUG -#define assert(expression) +#define assert(expression) ((void)(expression)) #else /* NDEBUG */ #include <kern/macros.h> diff --git a/kern/cbuf.c b/kern/cbuf.c index eca0c33c..2093f428 100644 --- a/kern/cbuf.c +++ b/kern/cbuf.c @@ -72,7 +72,7 @@ cbuf_push(struct cbuf *cbuf, const void *buf, size_t size, bool erase) int cbuf_pop(struct cbuf *cbuf, void *buf, size_t *sizep) { - __unused int error; + int error; if (cbuf_size(cbuf) == 0) { return ERROR_AGAIN; diff --git a/kern/condition.c b/kern/condition.c index b12068fa..e6d65951 100644 --- a/kern/condition.c +++ b/kern/condition.c @@ -88,7 +88,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex, void condition_wait(struct condition *condition, struct mutex *mutex) { - __unused int error; + int error; error = condition_wait_common(condition, mutex, false, 0); assert(!error); diff --git a/kern/kmem.c b/kern/kmem.c index 99664fc1..1d9fe4e0 100644 --- a/kern/kmem.c +++ b/kern/kmem.c @@ -646,7 +646,7 @@ kmem_cache_register(struct kmem_cache *cache, struct kmem_slab *slab) uintptr_t va, end; phys_addr_t pa; bool virtual; - __unused int error; + int error; assert(kmem_cache_registration_required(cache)); assert(slab->nr_refs == 0); @@ -472,7 +472,7 @@ INIT_OP_DEFINE(log_start, static void log_write(const void *s, size_t size) { - __unused int error; + int error; error = cbuf_push(&log_cbuf, s, size, true); assert(!error); diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c index 8da55270..68b006ad 100644 --- a/kern/mutex/mutex_adaptive.c +++ b/kern/mutex/mutex_adaptive.c @@ -24,7 +24,6 @@ #include <kern/clock.h> #include <kern/error.h> #include <kern/init.h> -#include <kern/macros.h> #include <kern/mutex.h> #include <kern/mutex_types.h> #include <kern/sleepq.h> @@ -236,7 +235,7 @@ out: void mutex_adaptive_lock_slow(struct mutex *mutex) { - __unused int error; + int error; error = mutex_adaptive_lock_slow_common(mutex, false, 0); assert(!error); diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c index abfa3396..2c655940 100644 --- a/kern/mutex/mutex_plain.c +++ b/kern/mutex/mutex_plain.c @@ -22,7 +22,6 @@ #include <kern/atomic.h> #include <kern/init.h> -#include <kern/macros.h> #include <kern/mutex.h> #include <kern/mutex_types.h> #include <kern/sleepq.h> @@ -134,7 +133,7 @@ out: void mutex_plain_lock_slow(struct mutex *mutex) { - __unused int error; + int error; error = mutex_plain_lock_slow_common(mutex, false, 0); assert(!error); diff --git a/kern/rtmutex.c b/kern/rtmutex.c index b6c4645c..00d87dfe 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -183,7 +183,7 @@ out: void rtmutex_lock_slow(struct rtmutex *rtmutex) { - __unused int error; + int error; error = rtmutex_lock_slow_common(rtmutex, false, 0); assert(!error); diff --git a/kern/semaphore.c b/kern/semaphore.c index 003f053c..72e843a9 100644 --- a/kern/semaphore.c +++ b/kern/semaphore.c @@ -20,7 +20,6 @@ #include <stddef.h> #include <stdint.h> -#include <kern/macros.h> #include <kern/semaphore.h> #include <kern/semaphore_i.h> #include <kern/sleepq.h> @@ -64,7 +63,7 @@ semaphore_wait_slow_common(struct semaphore *semaphore, void semaphore_wait_slow(struct semaphore *semaphore) { - __unused int error; + int error; error = semaphore_wait_slow_common(semaphore, false, 0); assert(!error); diff --git a/kern/sleepq.c b/kern/sleepq.c index 77bcd021..bd0e363e 100644 --- a/kern/sleepq.c +++ b/kern/sleepq.c @@ -111,14 +111,14 @@ sleepq_waiter_wakeup(struct sleepq_waiter *waiter) thread_wakeup(waiter->thread); } -__unused static bool -sleepq_state_initialized(const struct sleepq *sleepq) +static void +sleepq_assert_init_state(const struct sleepq *sleepq) { - return ((sleepq->bucket == NULL) - && (sleepq->sync_obj == NULL) - && (list_empty(&sleepq->waiters)) - && (sleepq->oldest_waiter == NULL) - && (sleepq->next_free == NULL)); + assert(sleepq->bucket == NULL); + assert(sleepq->sync_obj == NULL); + assert(list_empty(&sleepq->waiters)); + assert(sleepq->oldest_waiter == NULL); + assert(sleepq->next_free == NULL); } static void @@ -135,7 +135,7 @@ sleepq_unuse(struct sleepq *sleepq) sleepq->sync_obj = NULL; } -__unused static bool +static bool sleepq_in_use(const struct sleepq *sleepq) { return sleepq->sync_obj != NULL; @@ -187,8 +187,7 @@ sleepq_bucket_add(struct sleepq_bucket *bucket, struct sleepq *sleepq) } static void -sleepq_bucket_remove(__unused struct sleepq_bucket *bucket, - struct sleepq *sleepq) +sleepq_bucket_remove(struct sleepq_bucket *bucket, struct sleepq *sleepq) { assert(sleepq->bucket == bucket); sleepq->bucket = NULL; @@ -255,14 +254,14 @@ sleepq_create(void) return NULL; } - assert(sleepq_state_initialized(sleepq)); + sleepq_assert_init_state(sleepq); return sleepq; } void sleepq_destroy(struct sleepq *sleepq) { - assert(sleepq_state_initialized(sleepq)); + sleepq_assert_init_state(sleepq); kmem_cache_free(&sleepq_cache, sleepq); } @@ -354,7 +353,7 @@ sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags) assert(sync_obj != NULL); sleepq = thread_sleepq_lend(); - assert(sleepq_state_initialized(sleepq)); + sleepq_assert_init_state(sleepq); bucket = sleepq_bucket_get(sync_obj, condition); @@ -392,7 +391,7 @@ sleepq_return(struct sleepq *sleepq, unsigned long flags) spinlock_unlock_intr_restore(&bucket->lock, flags); - assert(sleepq_state_initialized(free_sleepq)); + sleepq_assert_init_state(free_sleepq); thread_sleepq_return(free_sleepq); } @@ -476,7 +475,7 @@ sleepq_wait_common(struct sleepq *sleepq, const char *wchan, void sleepq_wait(struct sleepq *sleepq, const char *wchan) { - __unused int error; + int error; error = sleepq_wait_common(sleepq, wchan, false, 0); assert(!error); diff --git a/kern/spinlock.h b/kern/spinlock.h index 49e6d558..dd98cbf1 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -26,8 +26,6 @@ #ifndef _KERN_SPINLOCK_H #define _KERN_SPINLOCK_H -#include <stdbool.h> - #include <kern/init.h> #include <kern/macros.h> #include <kern/spinlock_i.h> @@ -36,17 +34,13 @@ struct spinlock; +#define spinlock_assert_locked(lock) assert((lock)->value != SPINLOCK_UNLOCKED) + /* * Initialize a spin lock. */ void spinlock_init(struct spinlock *lock); -static inline bool -spinlock_locked(const struct spinlock *lock) -{ - return lock->value != SPINLOCK_UNLOCKED; -} - /* * Attempt to lock the given spin lock. * diff --git a/kern/sref.c b/kern/sref.c index 051fe4c1..84417a70 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -243,7 +243,7 @@ sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2) queue1->size += queue2->size; } -__unused static inline bool +static inline bool sref_counter_aligned(const struct sref_counter *counter) { return (((uintptr_t)counter & (~SREF_WEAKREF_MASK)) == 0); diff --git a/kern/task.h b/kern/task.h index 4573979e..4d9142ce 100644 --- a/kern/task.h +++ b/kern/task.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Richard Braun. + * Copyright (c) 2012 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ #include <kern/atomic.h> #include <kern/init.h> #include <kern/list.h> -#include <kern/macros.h> #include <kern/spinlock.h> #include <kern/thread.h> #include <vm/vm_map.h> @@ -54,7 +53,7 @@ task_get_kernel_task(void) static inline void task_ref(struct task *task) { - __unused unsigned long nr_refs; + unsigned long nr_refs; nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); diff --git a/kern/thread.c b/kern/thread.c index 09e15aa0..5bc643c9 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -476,7 +476,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(!thread->in_runq); ops = thread_get_real_sched_ops(thread); @@ -503,7 +503,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(thread->in_runq); runq->nr_threads--; @@ -524,7 +524,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); ops = thread_get_real_sched_ops(thread); @@ -540,7 +540,7 @@ thread_runq_get_next(struct thread_runq *runq) unsigned int i; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -573,7 +573,7 @@ static void thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) { assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(thread->state == THREAD_RUNNING); thread_runq_add(runq, thread); @@ -613,7 +613,7 @@ thread_runq_schedule(struct thread_runq *runq) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); llsync_report_context_switch(); @@ -658,7 +658,7 @@ thread_runq_schedule(struct thread_runq *runq) assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); return runq; } @@ -763,8 +763,7 @@ thread_sched_rt_get_next(struct thread_runq *runq) } static void -thread_sched_rt_reset_priority(struct thread *thread, - __unused unsigned short priority) +thread_sched_rt_reset_priority(struct thread *thread, unsigned short priority) { assert(priority <= THREAD_SCHED_RT_PRIO_MAX); thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE; @@ -1886,13 +1885,13 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags) static void * thread_alloc_stack(void) { - __unused struct vm_page *first_page, *last_page; + struct vm_page *first_page, *last_page; phys_addr_t first_pa, last_pa; struct pmap *kernel_pmap; size_t stack_size; uintptr_t va; void *mem; - __unused int error; + int error; kernel_pmap = pmap_get_kernel_pmap(); @@ -2551,7 +2550,7 @@ void thread_sleep(struct spinlock *interlock, const void *wchan_addr, const char *wchan_desc) { - __unused int error; + int error; error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0); assert(!error); @@ -2781,14 +2780,14 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, unsigned short priority) { const struct thread_sched_ops *ops; - __unused struct turnstile_td *td; struct thread_runq *runq; + struct turnstile_td *td; unsigned int global_priority; unsigned long flags; bool requeue, current; td = thread_turnstile_td(thread); - assert(turnstile_td_locked(td)); + turnstile_td_assert_lock(td); ops = thread_get_sched_ops(thread_policy_to_class(policy)); global_priority = ops->get_global_priority(priority); diff --git a/kern/thread.h b/kern/thread.h index 29c2dfe5..af8539d2 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -283,7 +283,7 @@ void thread_pi_setscheduler(struct thread *thread, unsigned char policy, static inline void thread_ref(struct thread *thread) { - __unused unsigned long nr_refs; + unsigned long nr_refs; nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); diff --git a/kern/timer.c b/kern/timer.c index 5a94860e..d6d1d9d2 100644 --- a/kern/timer.c +++ b/kern/timer.c @@ -137,7 +137,7 @@ timer_unlock_cpu_data(struct timer_cpu_data *cpu_data, unsigned long flags) * Timer state functions. */ -__unused static bool +static bool timer_ready(const struct timer *timer) { return timer->state == TIMER_TS_READY; diff --git a/kern/turnstile.c b/kern/turnstile.c index 157c43f2..f4a1892d 100644 --- a/kern/turnstile.c +++ b/kern/turnstile.c @@ -402,14 +402,14 @@ turnstile_td_propagate_priority(struct turnstile_td *td) turnstile_td_propagate_priority_loop(td); } -__unused static bool -turnstile_state_initialized(const struct turnstile *turnstile) +static void +turnstile_assert_init_state(const struct turnstile *turnstile) { - return ((turnstile->bucket == NULL) - && (turnstile->sync_obj == NULL) - && (plist_empty(&turnstile->waiters)) - && (turnstile->next_free == NULL) - && (turnstile->top_waiter == NULL)); + assert(turnstile->bucket == NULL); + assert(turnstile->sync_obj == NULL); + assert(plist_empty(&turnstile->waiters)); + assert(turnstile->next_free == NULL); + assert(turnstile->top_waiter == NULL); } static void @@ -426,7 +426,7 @@ turnstile_unuse(struct turnstile *turnstile) turnstile->sync_obj = NULL; } -__unused static bool +static bool turnstile_in_use(const struct turnstile *turnstile) { return turnstile->sync_obj != NULL; @@ -465,7 +465,7 @@ turnstile_bucket_add(struct turnstile_bucket *bucket, } static void -turnstile_bucket_remove(__unused struct turnstile_bucket *bucket, +turnstile_bucket_remove(struct turnstile_bucket *bucket, struct turnstile *turnstile) { assert(turnstile->bucket == bucket); @@ -530,14 +530,14 @@ turnstile_create(void) return NULL; } - assert(turnstile_state_initialized(turnstile)); + turnstile_assert_init_state(turnstile); return turnstile; } void turnstile_destroy(struct turnstile *turnstile) { - assert(turnstile_state_initialized(turnstile)); + turnstile_assert_init_state(turnstile); kmem_cache_free(&turnstile_cache, turnstile); } @@ -604,7 +604,7 @@ turnstile_lend(const void *sync_obj) assert(sync_obj != NULL); turnstile = thread_turnstile_lend(); - assert(turnstile_state_initialized(turnstile)); + turnstile_assert_init_state(turnstile); td = thread_turnstile_td(thread_self()); bucket = turnstile_bucket_get(sync_obj); @@ -654,7 +654,7 @@ turnstile_return(struct turnstile *turnstile) spinlock_unlock(&bucket->lock); - assert(turnstile_state_initialized(free_turnstile)); + turnstile_assert_init_state(free_turnstile); thread_turnstile_return(free_turnstile); } @@ -778,7 +778,7 @@ void turnstile_wait(struct turnstile *turnstile, const char *wchan, struct thread *owner) { - __unused int error; + int error; error = turnstile_wait_common(turnstile, wchan, owner, false, 0); assert(!error); @@ -810,7 +810,7 @@ turnstile_own(struct turnstile *turnstile) { struct turnstile_td *td; struct thread *owner; - __unused unsigned int top_priority; + unsigned int top_priority; assert(turnstile->owner == NULL); diff --git a/kern/turnstile.h b/kern/turnstile.h index f7ba0847..e7b4a5e3 100644 --- a/kern/turnstile.h +++ b/kern/turnstile.h @@ -43,6 +43,8 @@ struct turnstile; */ struct turnstile_td; +#define turnstile_td_assert_lock(td) spinlock_assert_locked(&(td)->lock) + /* * Initialize turnstile thread data. */ @@ -60,12 +62,6 @@ turnstile_td_init(struct turnstile_td *td) * Turnstile thread data locking functions. */ -static inline bool -turnstile_td_locked(struct turnstile_td *td) -{ - return spinlock_locked(&(td)->lock); -} - static inline void turnstile_td_lock(struct turnstile_td *td) { diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c index c0a91f15..07fd4d88 100644 --- a/vm/vm_kmem.c +++ b/vm/vm_kmem.c @@ -56,7 +56,7 @@ INIT_OP_DEFINE(vm_kmem_setup, INIT_OP_DEP(vm_object_setup, true), INIT_OP_DEP(vm_page_setup, true)); -__unused static int +static int vm_kmem_alloc_check(size_t size) { if (!vm_page_aligned(size) @@ -67,7 +67,7 @@ vm_kmem_alloc_check(size_t size) return 0; } -__unused static int +static int vm_kmem_free_check(uintptr_t va, size_t size) { if (!vm_page_aligned(va)) { diff --git a/vm/vm_page.c b/vm/vm_page.c index a3844662..38cc8af0 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -792,7 +792,7 @@ vm_page_lookup(phys_addr_t pa) return NULL; } -__unused static bool +static bool vm_page_block_referenced(const struct vm_page *page, unsigned int order) { unsigned int i, nr_pages; diff --git a/vm/vm_page.h b/vm/vm_page.h index 844ad066..9c8fa91c 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -240,7 +240,7 @@ vm_page_referenced(const struct vm_page *page) static inline void vm_page_ref(struct vm_page *page) { - __unused unsigned int nr_refs; + unsigned int nr_refs; nr_refs = atomic_fetch_add(&page->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned int)-1); |