diff options
-rw-r--r-- | kern/mutex.h | 19 | ||||
-rw-r--r-- | kern/rtmutex.h | 15 | ||||
-rw-r--r-- | kern/spinlock.h | 5 | ||||
-rw-r--r-- | kern/spinlock_i.h | 9 |
4 files changed, 24 insertions, 24 deletions
diff --git a/kern/mutex.h b/kern/mutex.h index f9fcd35..1100b77 100644 --- a/kern/mutex.h +++ b/kern/mutex.h @@ -70,6 +70,7 @@ mutex_unlock(struct mutex *mutex) #include <kern/assert.h> #include <kern/error.h> +#include <kern/macros.h> #include <kern/mutex_i.h> #include <kern/thread.h> @@ -100,11 +101,12 @@ mutex_trylock(struct mutex *mutex) state = mutex_lock_fast(mutex); - if (state == MUTEX_UNLOCKED) { - return 0; + if (unlikely(state != MUTEX_UNLOCKED)) { + assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); + return ERROR_BUSY; } - return ERROR_BUSY; + return 0; } /* @@ -122,13 +124,10 @@ mutex_lock(struct mutex *mutex) state = mutex_lock_fast(mutex); - if (state == MUTEX_UNLOCKED) { - return; + if (unlikely(state != MUTEX_UNLOCKED)) { + assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); + mutex_lock_slow(mutex); } - - assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED)); - - mutex_lock_slow(mutex); } /* @@ -144,7 +143,7 @@ mutex_unlock(struct mutex *mutex) state = mutex_unlock_fast(mutex); - if (state != MUTEX_LOCKED) { + if (unlikely(state != MUTEX_LOCKED)) { assert(state == MUTEX_CONTENDED); mutex_unlock_slow(mutex); } diff --git a/kern/rtmutex.h b/kern/rtmutex.h index 86c3793..e5cb5e5 100644 --- a/kern/rtmutex.h +++ b/kern/rtmutex.h @@ -28,6 +28,7 @@ #include <kern/assert.h> #include <kern/error.h> +#include <kern/macros.h> #include <kern/rtmutex_i.h> #include <kern/rtmutex_types.h> @@ -58,11 +59,11 @@ rtmutex_trylock(struct rtmutex *rtmutex) prev_owner = rtmutex_lock_fast(rtmutex); - if (prev_owner == 0) { - return 0; + if (unlikely(prev_owner != 0)) { + return ERROR_BUSY; } - return ERROR_BUSY; + return 0; } /* @@ -81,11 +82,9 @@ rtmutex_lock(struct rtmutex *rtmutex) prev_owner = rtmutex_lock_fast(rtmutex); - if (prev_owner == 0) { - return; + if (unlikely(prev_owner != 0)) { + rtmutex_lock_slow(rtmutex); } - - rtmutex_lock_slow(rtmutex); } /* @@ -101,7 +100,7 @@ rtmutex_unlock(struct rtmutex *rtmutex) prev_owner = rtmutex_unlock_fast(rtmutex); - if (!(prev_owner & RTMUTEX_CONTENDED)) { + if (prev_owner & RTMUTEX_CONTENDED) { return; } diff --git a/kern/spinlock.h b/kern/spinlock.h index 6297492..4dc4c83 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -26,6 +26,7 @@ #ifndef _KERN_SPINLOCK_H #define _KERN_SPINLOCK_H +#include <kern/macros.h> #include <kern/spinlock_i.h> #include <kern/spinlock_types.h> #include <kern/thread.h> @@ -55,7 +56,7 @@ spinlock_trylock(struct spinlock *lock) thread_preempt_disable(); error = spinlock_lock_fast(lock); - if (error) { + if (unlikely(error)) { thread_preempt_enable(); } @@ -117,7 +118,7 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags) cpu_intr_save(flags); error = spinlock_lock_fast(lock); - if (error) { + if (unlikely(error)) { cpu_intr_restore(*flags); thread_preempt_enable(); } diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index bf9bf2c..444ced9 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -24,6 +24,7 @@ #include <kern/assert.h> #include <kern/atomic.h> #include <kern/error.h> +#include <kern/macros.h> #include <kern/spinlock_types.h> #include <machine/cpu.h> @@ -42,7 +43,7 @@ spinlock_lock_fast(struct spinlock *lock) prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED); - if (prev != SPINLOCK_UNLOCKED) { + if (unlikely(prev != SPINLOCK_UNLOCKED)) { return ERROR_BUSY; } @@ -56,7 +57,7 @@ spinlock_unlock_fast(struct spinlock *lock) prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED); - if (prev != SPINLOCK_LOCKED) { + if (unlikely(prev != SPINLOCK_LOCKED)) { return ERROR_BUSY; } @@ -74,7 +75,7 @@ spinlock_lock_common(struct spinlock *lock) error = spinlock_lock_fast(lock); - if (error) { + if (unlikely(error)) { spinlock_lock_slow(lock); } } @@ -86,7 +87,7 @@ spinlock_unlock_common(struct spinlock *lock) error = spinlock_unlock_fast(lock); - if (error) { + if (unlikely(error)) { spinlock_unlock_slow(lock); } } |