summaryrefslogtreecommitdiff
path: root/kern/spinlock_i.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-03-02 23:23:30 +0100
committerRichard Braun <rbraun@sceen.net>2018-03-02 23:29:28 +0100
commit07c7d5d45c9bf7f95a027ed47a453cc6cb16b304 (patch)
tree46b3d76b3ecff1d99890f6ae352ffba240c5bc22 /kern/spinlock_i.h
parent43e07ea6df7f09b0a0853e3b9c55780aecaea393 (diff)
kern/spinlock: fix and optimize
Making the unlock operation block allows tricky deadlocks to occur in case a thread is interrupted right before announcing itself as the first waiter in the queue. Since locking is expected to block, the spinlock implementation is reworked to move the hand-off performed by the unlock operation into the lock operation. As a side effect, the common case of a single waiter is also optimized.
Diffstat (limited to 'kern/spinlock_i.h')
-rw-r--r--kern/spinlock_i.h38
1 files changed, 8 insertions, 30 deletions
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
index cd1bd368..da1233b4 100644
--- a/kern/spinlock_i.h
+++ b/kern/spinlock_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 Richard Braun.
+ * Copyright (c) 2012-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,12 +30,12 @@
#include <machine/cpu.h>
/*
- * Non-contended lock values.
+ * Uncontended lock values.
*
- * Any other lock value implies a contended lock.
+ * Any other value implies a contended lock.
*/
-#define SPINLOCK_UNLOCKED 0
-#define SPINLOCK_LOCKED 1
+#define SPINLOCK_UNLOCKED 0x0
+#define SPINLOCK_LOCKED 0x1
#ifdef SPINLOCK_TRACK_OWNER
@@ -63,7 +63,7 @@ spinlock_disown(struct spinlock *lock)
static inline int
spinlock_lock_fast(struct spinlock *lock)
{
- unsigned int prev;
+ uint32_t prev;
prev = atomic_cas_acquire(&lock->value, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED);
@@ -75,25 +75,8 @@ spinlock_lock_fast(struct spinlock *lock)
return 0;
}
-static inline int
-spinlock_unlock_fast(struct spinlock *lock)
-{
- unsigned int prev;
-
- spinlock_disown(lock);
- prev = atomic_cas_release(&lock->value, SPINLOCK_LOCKED, SPINLOCK_UNLOCKED);
-
- if (unlikely(prev != SPINLOCK_LOCKED)) {
- return EBUSY;
- }
-
- return 0;
-}
-
void spinlock_lock_slow(struct spinlock *lock);
-void spinlock_unlock_slow(struct spinlock *lock);
-
static inline void
spinlock_lock_common(struct spinlock *lock)
{
@@ -109,13 +92,8 @@ spinlock_lock_common(struct spinlock *lock)
static inline void
spinlock_unlock_common(struct spinlock *lock)
{
- int error;
-
- error = spinlock_unlock_fast(lock);
-
- if (unlikely(error)) {
- spinlock_unlock_slow(lock);
- }
+ spinlock_disown(lock);
+ atomic_and(&lock->value, ~SPINLOCK_LOCKED, ATOMIC_RELEASE);
}
#endif /* KERN_SPINLOCK_I_H */