summaryrefslogtreecommitdiff
path: root/kern/spinlock_i.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-04-09 16:02:36 +0200
committerRichard Braun <rbraun@sceen.net>2017-04-09 16:03:29 +0200
commit42b089048fc0d3e67fa10cb411767afa161c7222 (patch)
tree56338238b840f8dcdec3227f01aba62b974cf87d /kern/spinlock_i.h
parent239d8d2d4c14f917767c8c6d177aeb934df2c53a (diff)
kern/{mutex,rtmutex,spinlock}: optimize fast paths
Rework so that fast paths occupy the first indentation level, and use the unlikely macro on the relevant conditions.
Diffstat (limited to 'kern/spinlock_i.h')
-rw-r--r--kern/spinlock_i.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
index bf9bf2c..444ced9 100644
--- a/kern/spinlock_i.h
+++ b/kern/spinlock_i.h
@@ -24,6 +24,7 @@
#include <kern/assert.h>
#include <kern/atomic.h>
#include <kern/error.h>
+#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <machine/cpu.h>
@@ -42,7 +43,7 @@ spinlock_lock_fast(struct spinlock *lock)
prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED);
- if (prev != SPINLOCK_UNLOCKED) {
+ if (unlikely(prev != SPINLOCK_UNLOCKED)) {
return ERROR_BUSY;
}
@@ -56,7 +57,7 @@ spinlock_unlock_fast(struct spinlock *lock)
prev = atomic_cas_seq_cst(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED);
- if (prev != SPINLOCK_LOCKED) {
+ if (unlikely(prev != SPINLOCK_LOCKED)) {
return ERROR_BUSY;
}
@@ -74,7 +75,7 @@ spinlock_lock_common(struct spinlock *lock)
error = spinlock_lock_fast(lock);
- if (error) {
+ if (unlikely(error)) {
spinlock_lock_slow(lock);
}
}
@@ -86,7 +87,7 @@ spinlock_unlock_common(struct spinlock *lock)
error = spinlock_unlock_fast(lock);
- if (error) {
+ if (unlikely(error)) {
spinlock_unlock_slow(lock);
}
}