summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-07-30 20:55:20 +0200
committerRichard Braun <rbraun@sceen.net>2018-07-30 20:55:20 +0200
commit5f202c9f744a5d9c5b751038edd2379b3d244227 (patch)
treec5bce5b9e1d9c4b01dfed4ff941ad9944814b93c /kern
parentd3e43f5bfda0bdad7a829a7ed8c1272a395b196b (diff)
Rework assertive functions
Instead of combining assertions and checking into single functions, rework those into pure checking functions usable with assert(). Those functions were introduced because of warnings about unused functions/variables caused by an earlier implementation of assert().
Diffstat (limited to 'kern')
-rw-r--r--kern/condition.c2
-rw-r--r--kern/mutex.h7
-rw-r--r--kern/mutex/mutex_adaptive_i.h12
-rw-r--r--kern/mutex/mutex_pi_i.h8
-rw-r--r--kern/mutex/mutex_plain_i.h14
-rw-r--r--kern/rdxtree.c19
-rw-r--r--kern/rtmutex.h11
-rw-r--r--kern/rtmutex_i.h11
-rw-r--r--kern/sleepq.c22
-rw-r--r--kern/spinlock.h3
-rw-r--r--kern/thread.c16
-rw-r--r--kern/turnstile.c22
-rw-r--r--kern/turnstile.h6
13 files changed, 96 insertions, 57 deletions
diff --git a/kern/condition.c b/kern/condition.c
index e2c26b62..0a8c78d7 100644
--- a/kern/condition.c
+++ b/kern/condition.c
@@ -35,7 +35,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex,
struct sleepq *sleepq;
int error;
- mutex_assert_locked(mutex);
+ assert(mutex_locked(mutex));
sleepq = sleepq_lend(condition, true);
diff --git a/kern/mutex.h b/kern/mutex.h
index f446f5fe..0e6cd46c 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -23,6 +23,7 @@
#ifndef KERN_MUTEX_H
#define KERN_MUTEX_H
+#include <stdbool.h>
#include <stdint.h>
#if defined(CONFIG_MUTEX_ADAPTIVE)
@@ -47,7 +48,11 @@ mutex_init(struct mutex *mutex)
mutex_impl_init(mutex);
}
-#define mutex_assert_locked(mutex) mutex_impl_assert_locked(mutex)
+static inline bool
+mutex_locked(const struct mutex *mutex)
+{
+ return mutex_impl_locked(mutex);
+}
/*
* Attempt to lock the given mutex.
diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h
index e29fdb43..e171c9f1 100644
--- a/kern/mutex/mutex_adaptive_i.h
+++ b/kern/mutex/mutex_adaptive_i.h
@@ -25,6 +25,7 @@
#include <assert.h>
#include <errno.h>
+#include <stdbool.h>
#include <stdint.h>
#include <kern/atomic.h>
@@ -48,7 +49,14 @@ mutex_adaptive_init(struct mutex *mutex)
mutex->owner = 0;
}
-#define mutex_adaptive_assert_locked(mutex) assert((mutex)->owner != 0)
+static inline bool
+mutex_adaptive_locked(const struct mutex *mutex)
+{
+ uintptr_t owner;
+
+ owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
+ return (owner != 0);
+}
static inline int
mutex_adaptive_lock_fast(struct mutex *mutex)
@@ -89,7 +97,7 @@ void mutex_adaptive_unlock_slow(struct mutex *mutex);
*/
#define mutex_impl_init mutex_adaptive_init
-#define mutex_impl_assert_locked mutex_adaptive_assert_locked
+#define mutex_impl_locked mutex_adaptive_locked
static inline int
mutex_impl_trylock(struct mutex *mutex)
diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h
index f3bb28fa..1b79c202 100644
--- a/kern/mutex/mutex_pi_i.h
+++ b/kern/mutex/mutex_pi_i.h
@@ -23,6 +23,7 @@
" use <kern/mutex.h> instead"
#endif
+#include <stdbool.h>
#include <stdint.h>
#include <kern/mutex_types.h>
@@ -38,8 +39,11 @@ mutex_impl_init(struct mutex *mutex)
rtmutex_init(&mutex->rtmutex);
}
-#define mutex_impl_assert_locked(mutex) \
- rtmutex_assert_locked(&(mutex)->rtmutex)
+static inline bool
+mutex_impl_locked(const struct mutex *mutex)
+{
+ return rtmutex_locked(&mutex->rtmutex);
+}
static inline int
mutex_impl_trylock(struct mutex *mutex)
diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h
index a7928295..d3ce2d30 100644
--- a/kern/mutex/mutex_plain_i.h
+++ b/kern/mutex/mutex_plain_i.h
@@ -25,12 +25,14 @@
#include <assert.h>
#include <errno.h>
+#include <stdbool.h>
#include <stdint.h>
#include <kern/atomic.h>
#include <kern/init.h>
#include <kern/mutex_types.h>
+/* TODO Fix namespace */
#define MUTEX_UNLOCKED 0
#define MUTEX_LOCKED 1
#define MUTEX_CONTENDED 2
@@ -41,8 +43,14 @@ mutex_plain_init(struct mutex *mutex)
mutex->state = MUTEX_UNLOCKED;
}
-#define mutex_plain_assert_locked(mutex) \
- assert((mutex)->state != MUTEX_UNLOCKED)
+static inline bool
+mutex_plain_locked(const struct mutex *mutex)
+{
+ unsigned int state;
+
+ state = atomic_load(&mutex->state, ATOMIC_RELAXED);
+ return (state != MUTEX_UNLOCKED);
+}
static inline int
mutex_plain_lock_fast(struct mutex *mutex)
@@ -82,7 +90,7 @@ void mutex_plain_unlock_slow(struct mutex *mutex);
*/
#define mutex_impl_init mutex_plain_init
-#define mutex_impl_assert_locked mutex_plain_assert_locked
+#define mutex_impl_locked mutex_plain_locked
static inline int
mutex_impl_trylock(struct mutex *mutex)
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
index e7abaf02..9072ea12 100644
--- a/kern/rdxtree.c
+++ b/kern/rdxtree.c
@@ -109,12 +109,13 @@ struct rdxtree_node {
static struct kmem_cache rdxtree_node_cache;
-static inline void
-rdxtree_assert_alignment(const void *ptr)
+#ifndef NDEBUG
+static bool
+rdxtree_alignment_valid(const void *ptr)
{
- assert(((uintptr_t)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0);
- (void)ptr;
+ return (((uintptr_t)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0);
}
+#endif /* NDEBUG */
static inline void *
rdxtree_entry_addr(void *entry)
@@ -156,7 +157,7 @@ rdxtree_node_create(struct rdxtree_node **nodep, unsigned short height)
return ENOMEM;
}
- rdxtree_assert_alignment(node);
+ assert(rdxtree_alignment_valid(node));
node->parent = NULL;
node->height = height;
*nodep = node;
@@ -464,7 +465,7 @@ rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
int error;
assert(ptr != NULL);
- rdxtree_assert_alignment(ptr);
+ assert(rdxtree_alignment_valid(ptr));
if (unlikely(key > rdxtree_max_key(tree->height))) {
error = rdxtree_grow(tree, key);
@@ -552,7 +553,7 @@ rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
assert(rdxtree_key_alloc_enabled(tree));
assert(ptr != NULL);
- rdxtree_assert_alignment(ptr);
+ assert(rdxtree_alignment_valid(ptr));
height = tree->height;
@@ -743,11 +744,11 @@ rdxtree_replace_slot(void **slot, void *ptr)
void *old;
assert(ptr != NULL);
- rdxtree_assert_alignment(ptr);
+ assert(rdxtree_alignment_valid(ptr));
old = *slot;
assert(old != NULL);
- rdxtree_assert_alignment(old);
+ assert(rdxtree_alignment_valid(old));
rcu_store_ptr(*slot, ptr);
return old;
}
diff --git a/kern/rtmutex.h b/kern/rtmutex.h
index 64c09241..99b8ca74 100644
--- a/kern/rtmutex.h
+++ b/kern/rtmutex.h
@@ -26,8 +26,10 @@
#include <assert.h>
#include <errno.h>
+#include <stdbool.h>
#include <stdint.h>
+#include <kern/atomic.h>
#include <kern/init.h>
#include <kern/macros.h>
#include <kern/rtmutex_i.h>
@@ -35,7 +37,14 @@
struct rtmutex;
-#define rtmutex_assert_locked(rtmutex) assert((rtmutex)->owner != 0)
+static inline bool
+rtmutex_locked(const struct rtmutex *rtmutex)
+{
+ uintptr_t owner;
+
+ owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED);
+ return (owner != 0);
+}
/*
* Initialize a real-time mutex.
diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h
index 373c180f..83d48a77 100644
--- a/kern/rtmutex_i.h
+++ b/kern/rtmutex_i.h
@@ -47,8 +47,11 @@
#define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \
| RTMUTEX_CONTENDED)))
-#define rtmutex_assert_owner_aligned(owner) \
- assert(((owner) & ~RTMUTEX_OWNER_MASK) == 0)
+static inline bool
+rtmutex_owner_aligned(uintptr_t owner)
+{
+ return (((owner) & ~RTMUTEX_OWNER_MASK) == 0);
+}
static inline uintptr_t
rtmutex_lock_fast(struct rtmutex *rtmutex)
@@ -56,7 +59,7 @@ rtmutex_lock_fast(struct rtmutex *rtmutex)
uintptr_t owner;
owner = (uintptr_t)thread_self();
- rtmutex_assert_owner_aligned(owner);
+ assert(rtmutex_owner_aligned(owner));
return atomic_cas(&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE);
}
@@ -66,7 +69,7 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex)
uintptr_t owner, prev_owner;
owner = (uintptr_t)thread_self();
- rtmutex_assert_owner_aligned(owner);
+ assert(rtmutex_owner_aligned(owner));
prev_owner = atomic_cas(&rtmutex->owner, owner, 0, ATOMIC_RELEASE);
assert((prev_owner & RTMUTEX_OWNER_MASK) == owner);
return prev_owner;
diff --git a/kern/sleepq.c b/kern/sleepq.c
index df37a49d..ca80e07b 100644
--- a/kern/sleepq.c
+++ b/kern/sleepq.c
@@ -111,14 +111,14 @@ sleepq_waiter_wakeup(struct sleepq_waiter *waiter)
thread_wakeup(waiter->thread);
}
-static void
-sleepq_assert_init_state(const struct sleepq *sleepq)
+static bool
+sleepq_init_state_valid(const struct sleepq *sleepq)
{
- assert(sleepq->bucket == NULL);
- assert(sleepq->sync_obj == NULL);
- assert(list_empty(&sleepq->waiters));
- assert(sleepq->oldest_waiter == NULL);
- assert(sleepq->next_free == NULL);
+ return (sleepq->bucket == NULL)
+ && (sleepq->sync_obj == NULL)
+ && (list_empty(&sleepq->waiters))
+ && (sleepq->oldest_waiter == NULL)
+ && (sleepq->next_free == NULL);
}
static void
@@ -254,14 +254,14 @@ sleepq_create(void)
return NULL;
}
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_init_state_valid(sleepq));
return sleepq;
}
void
sleepq_destroy(struct sleepq *sleepq)
{
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_init_state_valid(sleepq));
kmem_cache_free(&sleepq_cache, sleepq);
}
@@ -404,7 +404,7 @@ sleepq_lend_common(const void *sync_obj, bool condition, unsigned long *flags)
assert(sync_obj != NULL);
sleepq = thread_sleepq_lend();
- sleepq_assert_init_state(sleepq);
+ assert(sleepq_init_state_valid(sleepq));
bucket = sleepq_bucket_get(sync_obj, condition);
@@ -450,7 +450,7 @@ sleepq_return_common(struct sleepq *sleepq, unsigned long *flags)
spinlock_unlock(&bucket->lock);
}
- sleepq_assert_init_state(free_sleepq);
+ assert(sleepq_init_state_valid(free_sleepq));
thread_sleepq_return(free_sleepq);
}
diff --git a/kern/spinlock.h b/kern/spinlock.h
index f9e74f56..3060aaa6 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -39,9 +39,6 @@
struct spinlock;
-/* TODO Remove, let users do it instead */
-#define spinlock_assert_locked(lock) assert(spinlock_locked(lock))
-
static inline bool
spinlock_locked(const struct spinlock *lock)
{
diff --git a/kern/thread.c b/kern/thread.c
index 6625163a..b2e360b9 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -485,7 +485,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(!thread->in_runq);
ops = thread_get_real_sched_ops(thread);
@@ -512,7 +512,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(thread->in_runq);
runq->nr_threads--;
@@ -533,7 +533,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread)
const struct thread_sched_ops *ops;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
ops = thread_get_real_sched_ops(thread);
@@ -549,7 +549,7 @@ thread_runq_get_next(struct thread_runq *runq)
unsigned int i;
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) {
thread = thread_sched_ops[i].get_next(runq);
@@ -582,7 +582,7 @@ static void
thread_runq_wakeup(struct thread_runq *runq, struct thread *thread)
{
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
assert(thread->state == THREAD_RUNNING);
thread_runq_add(runq, thread);
@@ -636,7 +636,7 @@ thread_runq_schedule(struct thread_runq *runq)
&& (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE)));
assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
thread_clear_flag(prev, THREAD_YIELD);
thread_runq_put_prev(runq, prev);
@@ -687,7 +687,7 @@ thread_runq_schedule(struct thread_runq *runq)
assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL);
assert(!cpu_intr_enabled());
- spinlock_assert_locked(&runq->lock);
+ assert(spinlock_locked(&runq->lock));
return runq;
}
@@ -2846,7 +2846,7 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy,
bool requeue, current;
td = thread_turnstile_td(thread);
- turnstile_td_assert_lock(td);
+ assert(turnstile_td_locked(td));
ops = thread_get_sched_ops(thread_policy_to_class(policy));
global_priority = ops->get_global_priority(priority);
diff --git a/kern/turnstile.c b/kern/turnstile.c
index f4a1892d..9d781c40 100644
--- a/kern/turnstile.c
+++ b/kern/turnstile.c
@@ -402,14 +402,14 @@ turnstile_td_propagate_priority(struct turnstile_td *td)
turnstile_td_propagate_priority_loop(td);
}
-static void
-turnstile_assert_init_state(const struct turnstile *turnstile)
+static bool
+turnstile_init_state_valid(const struct turnstile *turnstile)
{
- assert(turnstile->bucket == NULL);
- assert(turnstile->sync_obj == NULL);
- assert(plist_empty(&turnstile->waiters));
- assert(turnstile->next_free == NULL);
- assert(turnstile->top_waiter == NULL);
+ return (turnstile->bucket == NULL)
+ && (turnstile->sync_obj == NULL)
+ && (plist_empty(&turnstile->waiters))
+ && (turnstile->next_free == NULL)
+ && (turnstile->top_waiter == NULL);
}
static void
@@ -530,14 +530,14 @@ turnstile_create(void)
return NULL;
}
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_init_state_valid(turnstile));
return turnstile;
}
void
turnstile_destroy(struct turnstile *turnstile)
{
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_init_state_valid(turnstile));
kmem_cache_free(&turnstile_cache, turnstile);
}
@@ -604,7 +604,7 @@ turnstile_lend(const void *sync_obj)
assert(sync_obj != NULL);
turnstile = thread_turnstile_lend();
- turnstile_assert_init_state(turnstile);
+ assert(turnstile_init_state_valid(turnstile));
td = thread_turnstile_td(thread_self());
bucket = turnstile_bucket_get(sync_obj);
@@ -654,7 +654,7 @@ turnstile_return(struct turnstile *turnstile)
spinlock_unlock(&bucket->lock);
- turnstile_assert_init_state(free_turnstile);
+ assert(turnstile_init_state_valid(turnstile));
thread_turnstile_return(free_turnstile);
}
diff --git a/kern/turnstile.h b/kern/turnstile.h
index 6b59dd59..76b80301 100644
--- a/kern/turnstile.h
+++ b/kern/turnstile.h
@@ -43,7 +43,11 @@ struct turnstile;
*/
struct turnstile_td;
-#define turnstile_td_assert_lock(td) spinlock_assert_locked(&(td)->lock)
+static inline bool
+turnstile_td_locked(const struct turnstile_td *td)
+{
+ return spinlock_locked(&td->lock);
+}
/*
* Initialize turnstile thread data.