summaryrefslogtreecommitdiff
path: root/hurd/mutex.h
diff options
context:
space:
mode:
authorneal <neal>2008-01-22 22:08:45 +0000
committerneal <neal>2008-01-22 22:08:45 +0000
commita2c37e56a90570a5c28b694ffa358e040537764f (patch)
treef8a920072aab0b9fe6458e601c5f082d472ecd13 /hurd/mutex.h
parente0e1046a2a54fab8626b7721483899b30fa34929 (diff)
2008-01-22 Neal H. Walfield <neal@gnu.org>
* mutex.h [RM_INTERN]: Raise an error. [! __hurd_mutex_have_type]: Only define ss_mutex_t in this case. Define __hurd_mutex_have_type. Change ss_mutex_t to an int. [__need_ss_mutex_t]: Undefine __need_ss_mutex_t. Don't make the rest of the file available. [! __need_ss_mutex_t]: Include <hurd/futex.h>. (_MUTEX_UNLOCKED): New define. (_MUTEX_LOCKED): Likewise. (_MUTEX_WAITERS): Likewise. (ss_mutex_lock): Implement in terms of futexes. (ss_mutex_unlock): Likewise. (ss_mutex_trylock): Likewise. * lock.h (SS_RMUTEX_LOCK): Define. (SS_RMUTEX_LOCK_INC): Likewise. (SS_RMUTEX_LOCK_WAIT): Likewise. (SS_RMUTEX_UNLOCK): Likewise. (SS_RMUTEX_UNLOCK_DEC): Likewise. (SS_RMUTEX_TRYLOCK): Likewise. (SS_RMUTEX_TRYLOCK_INC): Likewise. (SS_RMUTEX_TRYLOCK_BLOCKED): Likewise. (ss_lock_trace_dump): Handle the above new cases. * rmutex.h: New file. * Makefile.am (includehurd_HEADERS): Add rmutex.h. * headers.m4: Link $(BUILDIR)/include/hurd/rmutex.h to rmutex.h.
Diffstat (limited to 'hurd/mutex.h')
-rw-r--r--hurd/mutex.h102
1 files changed, 63 insertions, 39 deletions
diff --git a/hurd/mutex.h b/hurd/mutex.h
index ce41e01..3e21eee 100644
--- a/hurd/mutex.h
+++ b/hurd/mutex.h
@@ -18,6 +18,21 @@
License along with GNU Hurd. If not, see
<http://www.gnu.org/licenses/>. */
+#ifdef RM_INTERN
+# error "This implementation is not appropriate for the kernel."
+#endif
+
+#ifndef __hurd_mutex_have_type
+#define __hurd_mutex_have_type
+typedef int ss_mutex_t;
+#endif
+
+/* If __need_ss_mutex_t is defined, then we only export the type
+ definition. */
+#ifdef __need_ss_mutex_t
+# undef __need_ss_mutex_t
+#else
+
#ifndef _HURD_MUTEX_H
#define _HURD_MUTEX_H
@@ -25,31 +40,40 @@
#include <atomic.h>
#include <assert.h>
#include <hurd/lock.h>
+#include <hurd/futex.h>
+
+/* Unlocked. */
+#define _MUTEX_UNLOCKED 0
+/* There the lock is locked. */
+#define _MUTEX_LOCKED 1
+/* There there are waiters. */
+#define _MUTEX_WAITERS 2
-typedef l4_thread_id_t ss_mutex_t;
static inline void
-ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lock)
+ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lockp)
{
- l4_thread_id_t owner;
-
- for (;;)
+ int c;
+ c = atomic_compare_and_exchange_val_acq (lockp, _MUTEX_LOCKED,
+ _MUTEX_UNLOCKED);
+ if (c != _MUTEX_UNLOCKED)
+ /* Someone else owns the lock. */
{
- owner = atomic_exchange_acq (lock, l4_myself ());
- if (owner == l4_nilthread)
- {
- ss_mutex_trace_add (SS_MUTEX_LOCK, caller, line, lock);
- return;
- }
-
- ss_mutex_trace_add (SS_MUTEX_LOCK_WAIT, caller, line, lock);
+ ss_mutex_trace_add (SS_MUTEX_LOCK_WAIT, caller, line, lockp);
- if (owner == l4_myself ())
- ss_lock_trace_dump (lock);
- assert (owner != l4_myself ());
+ if (c != _MUTEX_WAITERS)
+ /* Note that there are waiters. */
+ c = atomic_exchange_acq (lockp, _MUTEX_WAITERS);
- __ss_lock_wait (owner);
+ /* Try to sleep but only if LOCKP is _MUTEX_WAITERS. */
+ while (c != _MUTEX_UNLOCKED)
+ {
+ futex_wait (lockp, _MUTEX_WAITERS);
+ c = atomic_exchange_acq (lockp, _MUTEX_WAITERS);
+ }
}
+
+ ss_mutex_trace_add (SS_MUTEX_LOCK, caller, line, lockp);
}
#define ss_mutex_lock(__sml_lockp) \
@@ -61,22 +85,19 @@ ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lock)
while (0)
static inline void
-ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lock)
+ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lockp)
{
- l4_thread_id_t waiter;
-
- waiter = atomic_exchange_acq (lock, l4_nilthread);
- ss_mutex_trace_add (SS_MUTEX_UNLOCK, caller, line, lock);
- if (waiter == l4_myself ())
- /* No waiter. */
- return;
-
- if (waiter == l4_nilthread)
- ss_lock_trace_dump (lock);
- assert (waiter != l4_nilthread);
+ /* We rely on the knowledge that unlocked is 0, locked and no
+ waiters is 1 and locked with waiters is 2. Thus if *lockp is 1,
+ an atomic dec yields 0 and we know that there are no waiters. */
+ if (! atomic_decrement_and_test (lockp))
+ /* There are waiters. */
+ {
+ *lockp = 0;
+ futex_wake (lockp, 1);
+ }
- /* Signal the waiter. */
- __ss_lock_wakeup (waiter);
+ ss_mutex_trace_add (SS_MUTEX_UNLOCK, caller, line, lockp);
}
#define ss_mutex_unlock(__smu_lockp) \
@@ -88,19 +109,19 @@ ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lock)
while (0)
static inline bool
-ss_mutex_trylock (__const char *caller, int line, ss_mutex_t *lock)
+ss_mutex_trylock (__const char *caller, int line, ss_mutex_t *lockp)
{
- l4_thread_id_t owner;
-
- owner = atomic_compare_and_exchange_val_acq (lock, l4_myself (),
- l4_nilthread);
- if (owner == l4_nilthread)
+ int c;
+ c = atomic_compare_and_exchange_val_acq (lockp, _MUTEX_LOCKED,
+ _MUTEX_UNLOCKED);
+ if (c == _MUTEX_UNLOCKED)
+ /* Got the lock. */
{
- ss_mutex_trace_add (SS_MUTEX_TRYLOCK, caller, line, lock);
+ ss_mutex_trace_add (SS_MUTEX_TRYLOCK, caller, line, lockp);
return true;
}
- ss_mutex_trace_add (SS_MUTEX_TRYLOCK_BLOCKED, caller, line, lock);
+ // ss_mutex_trace_add (SS_MUTEX_TRYLOCK_BLOCKED, caller, line, lockp);
return false;
}
@@ -114,3 +135,6 @@ ss_mutex_trylock (__const char *caller, int line, ss_mutex_t *lock)
})
#endif
+
+#endif /* __need_ss_mutex_t */
+