diff options
-rw-r--r-- | arch/x86/machine/pmap.c | 34 | ||||
-rw-r--r-- | kern/condition.c | 2 | ||||
-rw-r--r-- | kern/mutex.h | 7 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive_i.h | 12 | ||||
-rw-r--r-- | kern/mutex/mutex_pi_i.h | 8 | ||||
-rw-r--r-- | kern/mutex/mutex_plain_i.h | 14 | ||||
-rw-r--r-- | kern/rdxtree.c | 19 | ||||
-rw-r--r-- | kern/rtmutex.h | 11 | ||||
-rw-r--r-- | kern/rtmutex_i.h | 11 | ||||
-rw-r--r-- | kern/sleepq.c | 22 | ||||
-rw-r--r-- | kern/spinlock.h | 3 | ||||
-rw-r--r-- | kern/thread.c | 16 | ||||
-rw-r--r-- | kern/turnstile.c | 22 | ||||
-rw-r--r-- | kern/turnstile.h | 6 | ||||
-rw-r--r-- | vm/vm_map.c | 38 |
15 files changed, 128 insertions, 97 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c index f77c6aa5..064b98ce 100644 --- a/arch/x86/machine/pmap.c +++ b/arch/x86/machine/pmap.c @@ -21,6 +21,7 @@ #include <assert.h> #include <errno.h> #include <stdalign.h> +#include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> @@ -503,22 +504,17 @@ pmap_ap_setup_paging(unsigned int ap_id) #endif /* CONFIG_X86_PAE */ } -/* - * Check address range with regard to physical map. - */ -#define pmap_assert_range(pmap, start, end) \ -MACRO_BEGIN \ - assert((start) < (end)); \ - assert(((end) <= PMAP_START_DIRECTMAP_ADDRESS) \ - || ((start) >= PMAP_END_DIRECTMAP_ADDRESS)); \ - \ - if ((pmap) == pmap_get_kernel_pmap()) { \ - assert(((start) >= PMAP_START_KMEM_ADDRESS) \ - && ((end) <= PMAP_END_KMEM_ADDRESS)); \ - } else { \ - assert((end) <= PMAP_END_ADDRESS); \ - } \ -MACRO_END +static bool +pmap_range_valid(const struct pmap *pmap, uintptr_t start, uintptr_t end) +{ + return (start < end) + && ((end <= PMAP_START_DIRECTMAP_ADDRESS) + || (start >= PMAP_END_DIRECTMAP_ADDRESS)) + && ((pmap == pmap_get_kernel_pmap()) + ? ((start >= PMAP_START_KMEM_ADDRESS) + && (end <= PMAP_END_KMEM_ADDRESS)) + : (end <= PMAP_END_ADDRESS)); +} static inline pmap_pte_t * pmap_ptp_from_pa(phys_addr_t pa) @@ -1223,7 +1219,7 @@ pmap_enter(struct pmap *pmap, uintptr_t va, phys_addr_t pa, va = vm_page_trunc(va); pa = vm_page_trunc(pa); - pmap_assert_range(pmap, va, va + PAGE_SIZE); + assert(pmap_range_valid(pmap, va, va + PAGE_SIZE)); oplist = pmap_update_oplist_get(); error = pmap_update_oplist_prepare(oplist, pmap); @@ -1296,7 +1292,7 @@ pmap_remove(struct pmap *pmap, uintptr_t va, const struct cpumap *cpumap) int error; va = vm_page_trunc(va); - pmap_assert_range(pmap, va, va + PAGE_SIZE); + assert(pmap_range_valid(pmap, va, va + PAGE_SIZE)); oplist = pmap_update_oplist_get(); error = pmap_update_oplist_prepare(oplist, pmap); @@ -1347,7 +1343,7 @@ pmap_protect(struct pmap *pmap, uintptr_t va, int prot, int error; va = vm_page_trunc(va); - pmap_assert_range(pmap, va, va + PAGE_SIZE); + assert(pmap_range_valid(pmap, va, va + PAGE_SIZE)); oplist = pmap_update_oplist_get(); error = pmap_update_oplist_prepare(oplist, pmap); diff --git a/kern/condition.c b/kern/condition.c index e2c26b62..0a8c78d7 100644 --- a/kern/condition.c +++ b/kern/condition.c @@ -35,7 +35,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex, struct sleepq *sleepq; int error; - mutex_assert_locked(mutex); + assert(mutex_locked(mutex)); sleepq = sleepq_lend(condition, true); diff --git a/kern/mutex.h b/kern/mutex.h index f446f5fe..0e6cd46c 100644 --- a/kern/mutex.h +++ b/kern/mutex.h @@ -23,6 +23,7 @@ #ifndef KERN_MUTEX_H #define KERN_MUTEX_H +#include <stdbool.h> #include <stdint.h> #if defined(CONFIG_MUTEX_ADAPTIVE) @@ -47,7 +48,11 @@ mutex_init(struct mutex *mutex) mutex_impl_init(mutex); } -#define mutex_assert_locked(mutex) mutex_impl_assert_locked(mutex) +static inline bool +mutex_locked(const struct mutex *mutex) +{ + return mutex_impl_locked(mutex); +} /* * Attempt to lock the given mutex. diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h index e29fdb43..e171c9f1 100644 --- a/kern/mutex/mutex_adaptive_i.h +++ b/kern/mutex/mutex_adaptive_i.h @@ -25,6 +25,7 @@ #include <assert.h> #include <errno.h> +#include <stdbool.h> #include <stdint.h> #include <kern/atomic.h> @@ -48,7 +49,14 @@ mutex_adaptive_init(struct mutex *mutex) mutex->owner = 0; } -#define mutex_adaptive_assert_locked(mutex) assert((mutex)->owner != 0) +static inline bool +mutex_adaptive_locked(const struct mutex *mutex) +{ + uintptr_t owner; + + owner = atomic_load(&mutex->owner, ATOMIC_RELAXED); + return (owner != 0); +} static inline int mutex_adaptive_lock_fast(struct mutex *mutex) @@ -89,7 +97,7 @@ void mutex_adaptive_unlock_slow(struct mutex *mutex); */ #define mutex_impl_init mutex_adaptive_init -#define mutex_impl_assert_locked mutex_adaptive_assert_locked +#define mutex_impl_locked mutex_adaptive_locked static inline int mutex_impl_trylock(struct mutex *mutex) diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h index f3bb28fa..1b79c202 100644 --- a/kern/mutex/mutex_pi_i.h +++ b/kern/mutex/mutex_pi_i.h @@ -23,6 +23,7 @@ " use <kern/mutex.h> instead" #endif +#include <stdbool.h> #include <stdint.h> #include <kern/mutex_types.h> @@ -38,8 +39,11 @@ mutex_impl_init(struct mutex *mutex) rtmutex_init(&mutex->rtmutex); } -#define mutex_impl_assert_locked(mutex) \ - rtmutex_assert_locked(&(mutex)->rtmutex) +static inline bool +mutex_impl_locked(const struct mutex *mutex) +{ + return rtmutex_locked(&mutex->rtmutex); +} static inline int mutex_impl_trylock(struct mutex *mutex) diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h index a7928295..d3ce2d30 100644 --- a/kern/mutex/mutex_plain_i.h +++ b/kern/mutex/mutex_plain_i.h @@ -25,12 +25,14 @@ #include <assert.h> #include <errno.h> +#include <stdbool.h> #include <stdint.h> #include <kern/atomic.h> #include <kern/init.h> #include <kern/mutex_types.h> +/* TODO Fix namespace */ #define MUTEX_UNLOCKED 0 #define MUTEX_LOCKED 1 #define MUTEX_CONTENDED 2 @@ -41,8 +43,14 @@ mutex_plain_init(struct mutex *mutex) mutex->state = MUTEX_UNLOCKED; } -#define mutex_plain_assert_locked(mutex) \ - assert((mutex)->state != MUTEX_UNLOCKED) +static inline bool +mutex_plain_locked(const struct mutex *mutex) +{ + unsigned int state; + + state = atomic_load(&mutex->state, ATOMIC_RELAXED); + return (state != MUTEX_UNLOCKED); +} static inline int mutex_plain_lock_fast(struct mutex *mutex) @@ -82,7 +90,7 @@ void mutex_plain_unlock_slow(struct mutex *mutex); */ #define mutex_impl_init mutex_plain_init -#define mutex_impl_assert_locked mutex_plain_assert_locked +#define mutex_impl_locked mutex_plain_locked static inline int mutex_impl_trylock(struct mutex *mutex) diff --git a/kern/rdxtree.c b/kern/rdxtree.c index e7abaf02..9072ea12 100644 --- a/kern/rdxtree.c +++ b/kern/rdxtree.c @@ -109,12 +109,13 @@ struct rdxtree_node { static struct kmem_cache rdxtree_node_cache; -static inline void -rdxtree_assert_alignment(const void *ptr) +#ifndef NDEBUG +static bool +rdxtree_alignment_valid(const void *ptr) { - assert(((uintptr_t)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0); - (void)ptr; + return (((uintptr_t)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0); } +#endif /* NDEBUG */ static inline void * rdxtree_entry_addr(void *entry) @@ -156,7 +157,7 @@ rdxtree_node_create(struct rdxtree_node **nodep, unsigned short height) return ENOMEM; } - rdxtree_assert_alignment(node); + assert(rdxtree_alignment_valid(node)); node->parent = NULL; node->height = height; *nodep = node; @@ -464,7 +465,7 @@ rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key, int error; assert(ptr != NULL); - rdxtree_assert_alignment(ptr); + assert(rdxtree_alignment_valid(ptr)); if (unlikely(key > rdxtree_max_key(tree->height))) { error = rdxtree_grow(tree, key); @@ -552,7 +553,7 @@ rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr, assert(rdxtree_key_alloc_enabled(tree)); assert(ptr != NULL); - rdxtree_assert_alignment(ptr); + assert(rdxtree_alignment_valid(ptr)); height = tree->height; @@ -743,11 +744,11 @@ rdxtree_replace_slot(void **slot, void *ptr) void *old; assert(ptr != NULL); - rdxtree_assert_alignment(ptr); + assert(rdxtree_alignment_valid(ptr)); old = *slot; assert(old != NULL); - rdxtree_assert_alignment(old); + assert(rdxtree_alignment_valid(old)); rcu_store_ptr(*slot, ptr); return old; } diff --git a/kern/rtmutex.h b/kern/rtmutex.h index 64c09241..99b8ca74 100644 --- a/kern/rtmutex.h +++ b/kern/rtmutex.h @@ -26,8 +26,10 @@ #include <assert.h> #include <errno.h> +#include <stdbool.h> #include <stdint.h> +#include <kern/atomic.h> #include <kern/init.h> #include <kern/macros.h> #include <kern/rtmutex_i.h> @@ -35,7 +37,14 @@ struct rtmutex; -#define rtmutex_assert_locked(rtmutex) assert((rtmutex)->owner != 0) +static inline bool +rtmutex_locked(const struct rtmutex *rtmutex) +{ + uintptr_t owner; + + owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED); + return (owner != 0); +} /* * Initialize a real-time mutex. diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index 373c180f..83d48a77 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -47,8 +47,11 @@ #define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \ | RTMUTEX_CONTENDED))) -#define rtmutex_assert_owner_aligned(owner) \ - assert(((owner) & ~RTMUTEX_OWNER_MASK) == 0) +static inline bool +rtmutex_owner_aligned(uintptr_t owner) +{ + return (((owner) & ~RTMUTEX_OWNER_MASK) == 0); +} static inline uintptr_t rtmutex_lock_fast(struct rtmutex *rtmutex) @@ -56,7 +59,7 @@ rtmutex_lock_fast(struct rtmutex *rtmutex) uintptr_t owner; owner = (uintptr_t)thread_self(); - rtmutex_assert_owner_aligned(owner); + assert(rtmutex_owner_aligned(owner)); return atomic_cas(&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE); } @@ -66,7 +69,7 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex) uintptr_t owner, prev_owner; owner = (uintptr_t)thread_self(); - rtmutex_assert_owner_aligned(owner); + assert(rtmutex_owner_aligned(owner)); prev_owner = atomic_cas(&rtmutex->owner, owner, 0, ATOMIC_RELEASE); assert((prev_owner & RTMUTEX_OWNER_MASK) == owner); return prev_owner; diff --git a/kern/sleepq.c b/kern/sleepq.c index df37a49d..ca80e07b 100644 --- a/kern/sleepq.c +++ b/kern/sleepq.c @@ -111,14 +111,14 @@ sleepq_waiter_wakeup(struct sleepq_waiter *waiter) thread_wakeup(waiter->thread); } -static void -sleepq_assert_init_state(const struct sleepq *sleepq) +static bool +sleepq_init_state_valid(const struct sleepq *sleepq) { - assert(sleepq->bucket == NULL); - assert(sleepq->sync_obj == NULL); - assert(list_empty(&sleepq->waiters)); - assert(sleepq->oldest_waiter == NULL); - assert(sleepq->next_free == NULL); + return (sleepq->bucket == NULL) + && (sleepq->sync_obj == NULL) + && (list_empty(&sleepq->waiters)) + && (sleepq->oldest_waiter == NULL) + && (sleepq->next_free == NULL); } static void @@ -254,14 +254,14 @@ sleepq_create(void) return NULL; } - sleepq_assert_init_state(sleepq); + assert(sleepq_init_state_valid(sleepq)); return sleepq; } void sleepq_destroy(struct sleepq *sleepq) { - sleepq_assert_init_state(sleepq); + assert(sleepq_init_state_valid(sleepq)); kmem_cache_free(&sleepq_cache, sleepq); } @@ -404,7 +404,7 @@ sleepq_lend_common(const void *sync_obj, bool condition, unsigned long *flags) assert(sync_obj != NULL); sleepq = thread_sleepq_lend(); - sleepq_assert_init_state(sleepq); + assert(sleepq_init_state_valid(sleepq)); bucket = sleepq_bucket_get(sync_obj, condition); @@ -450,7 +450,7 @@ sleepq_return_common(struct sleepq *sleepq, unsigned long *flags) spinlock_unlock(&bucket->lock); } - sleepq_assert_init_state(free_sleepq); + assert(sleepq_init_state_valid(free_sleepq)); thread_sleepq_return(free_sleepq); } diff --git a/kern/spinlock.h b/kern/spinlock.h index f9e74f56..3060aaa6 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -39,9 +39,6 @@ struct spinlock; -/* TODO Remove, let users do it instead */ -#define spinlock_assert_locked(lock) assert(spinlock_locked(lock)) - static inline bool spinlock_locked(const struct spinlock *lock) { diff --git a/kern/thread.c b/kern/thread.c index 6625163a..b2e360b9 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -485,7 +485,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(!thread->in_runq); ops = thread_get_real_sched_ops(thread); @@ -512,7 +512,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->in_runq); runq->nr_threads--; @@ -533,7 +533,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); ops = thread_get_real_sched_ops(thread); @@ -549,7 +549,7 @@ thread_runq_get_next(struct thread_runq *runq) unsigned int i; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -582,7 +582,7 @@ static void thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) { assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->state == THREAD_RUNNING); thread_runq_add(runq, thread); @@ -636,7 +636,7 @@ thread_runq_schedule(struct thread_runq *runq) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); thread_clear_flag(prev, THREAD_YIELD); thread_runq_put_prev(runq, prev); @@ -687,7 +687,7 @@ thread_runq_schedule(struct thread_runq *runq) assert(prev->preempt_level == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); return runq; } @@ -2846,7 +2846,7 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, bool requeue, current; td = thread_turnstile_td(thread); - turnstile_td_assert_lock(td); + assert(turnstile_td_locked(td)); ops = thread_get_sched_ops(thread_policy_to_class(policy)); global_priority = ops->get_global_priority(priority); diff --git a/kern/turnstile.c b/kern/turnstile.c index f4a1892d..9d781c40 100644 --- a/kern/turnstile.c +++ b/kern/turnstile.c @@ -402,14 +402,14 @@ turnstile_td_propagate_priority(struct turnstile_td *td) turnstile_td_propagate_priority_loop(td); } -static void -turnstile_assert_init_state(const struct turnstile *turnstile) +static bool +turnstile_init_state_valid(const struct turnstile *turnstile) { - assert(turnstile->bucket == NULL); - assert(turnstile->sync_obj == NULL); - assert(plist_empty(&turnstile->waiters)); - assert(turnstile->next_free == NULL); - assert(turnstile->top_waiter == NULL); + return (turnstile->bucket == NULL) + && (turnstile->sync_obj == NULL) + && (plist_empty(&turnstile->waiters)) + && (turnstile->next_free == NULL) + && (turnstile->top_waiter == NULL); } static void @@ -530,14 +530,14 @@ turnstile_create(void) return NULL; } - turnstile_assert_init_state(turnstile); + assert(turnstile_init_state_valid(turnstile)); return turnstile; } void turnstile_destroy(struct turnstile *turnstile) { - turnstile_assert_init_state(turnstile); + assert(turnstile_init_state_valid(turnstile)); kmem_cache_free(&turnstile_cache, turnstile); } @@ -604,7 +604,7 @@ turnstile_lend(const void *sync_obj) assert(sync_obj != NULL); turnstile = thread_turnstile_lend(); - turnstile_assert_init_state(turnstile); + assert(turnstile_init_state_valid(turnstile)); td = thread_turnstile_td(thread_self()); bucket = turnstile_bucket_get(sync_obj); @@ -654,7 +654,7 @@ turnstile_return(struct turnstile *turnstile) spinlock_unlock(&bucket->lock); - turnstile_assert_init_state(free_turnstile); + assert(turnstile_init_state_valid(turnstile)); thread_turnstile_return(free_turnstile); } diff --git a/kern/turnstile.h b/kern/turnstile.h index 6b59dd59..76b80301 100644 --- a/kern/turnstile.h +++ b/kern/turnstile.h @@ -43,7 +43,11 @@ struct turnstile; */ struct turnstile_td; -#define turnstile_td_assert_lock(td) spinlock_assert_locked(&(td)->lock) +static inline bool +turnstile_td_locked(const struct turnstile_td *td) +{ + return spinlock_locked(&td->lock); +} /* * Initialize turnstile thread data. diff --git a/vm/vm_map.c b/vm/vm_map.c index dac3bc02..03bc7165 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -21,6 +21,7 @@ #include <assert.h> #include <errno.h> +#include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> @@ -127,28 +128,23 @@ vm_map_entry_cmp_insert(const struct rbtree_node *a, return vm_map_entry_cmp_lookup(entry->start, b); } -#ifndef NDEBUG -static void -vm_map_request_assert_valid(const struct vm_map_request *request) +static bool +vm_map_request_valid(const struct vm_map_request *request) { - assert((request->object != NULL) || (request->offset == 0)); - assert(vm_page_aligned(request->offset)); - assert(vm_page_aligned(request->start)); - assert(request->size > 0); - assert(vm_page_aligned(request->size)); - assert((request->start + request->size) > request->start); - assert((request->align == 0) || (request->align >= PAGE_SIZE)); - assert(ISP2(request->align)); - - assert((VM_MAP_PROT(request->flags) & VM_MAP_MAXPROT(request->flags)) - == VM_MAP_PROT(request->flags)); - assert(!(request->flags & VM_MAP_FIXED) - || (request->align == 0) - || P2ALIGNED(request->start, request->align)); + return ((request->object != NULL) || (request->offset == 0)) + && (vm_page_aligned(request->offset)) + && (vm_page_aligned(request->start)) + && (request->size > 0) + && (vm_page_aligned(request->size)) + && ((request->start + request->size) > request->start) + && ((request->align == 0) || (request->align >= PAGE_SIZE)) + && (ISP2(request->align)) + && ((VM_MAP_PROT(request->flags) & VM_MAP_MAXPROT(request->flags)) + == VM_MAP_PROT(request->flags)) + && (!(request->flags & VM_MAP_FIXED) + || (request->align == 0) + || P2ALIGNED(request->start, request->align)); } -#else /* NDEBUG */ -#define vm_map_request_assert_valid(request) -#endif /* NDEBUG */ /* * Look up an entry in a map. @@ -376,7 +372,7 @@ vm_map_prepare(struct vm_map *map, uintptr_t start, request->flags = flags; request->object = object; request->offset = offset; - vm_map_request_assert_valid(request); + assert(vm_map_request_valid(request)); if (flags & VM_MAP_FIXED) { error = vm_map_find_fixed(map, request); |