summaryrefslogtreecommitdiff
path: root/hurd
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-04 21:15:36 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-04 21:15:36 +0100
commit5a55be63d459f1520f8013c826f5987f55acad4e (patch)
tree526f67669297f58013861ce77f59b10982bf1442 /hurd
parent737a7da61c15cd3f532c73cc728b4f0a1d823a77 (diff)
Change <hurd/mutex.h> to use gcc's __sync functions.
2008-12-04 Neal H. Walfield <neal@gnu.org> * mutex.h: Don't include <atomic.h>. (ss_mutex_lock): Rewrite to use gcc's __sync functions. Add asserts. (ss_mutex_unlock): Likewise. (ss_mutex_trylock): Likewise.
Diffstat (limited to 'hurd')
-rw-r--r--hurd/ChangeLog8
-rw-r--r--hurd/mutex.h77
2 files changed, 56 insertions, 29 deletions
diff --git a/hurd/ChangeLog b/hurd/ChangeLog
index 312aac4..5af636a 100644
--- a/hurd/ChangeLog
+++ b/hurd/ChangeLog
@@ -1,3 +1,11 @@
+2008-12-04 Neal H. Walfield <neal@gnu.org>
+
+ * mutex.h: Don't include <atomic.h>.
+ (ss_mutex_lock): Rewrite to use gcc's __sync functions. Add
+ asserts.
+ (ss_mutex_unlock): Likewise.
+ (ss_mutex_trylock): Likewise.
+
2008-11-18 Neal H. Walfield <neal@gnu.org>
* cap.h (cap_copy_x): Correctly detect type mismatches.
diff --git a/hurd/mutex.h b/hurd/mutex.h
index d86e04e..cded557 100644
--- a/hurd/mutex.h
+++ b/hurd/mutex.h
@@ -37,7 +37,6 @@ typedef int ss_mutex_t;
#define _HURD_MUTEX_H
#include <l4/thread.h>
-#include <atomic.h>
#include <assert.h>
#include <hurd/lock.h>
#include <hurd/futex.h>
@@ -53,37 +52,49 @@ typedef int ss_mutex_t;
static inline void
ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lockp)
{
- int c;
- c = atomic_compare_and_exchange_val_acq (lockp, _MUTEX_LOCKED,
- _MUTEX_UNLOCKED);
- if (c != _MUTEX_UNLOCKED)
- /* Someone else owns the lock. */
+ for (;;)
{
+ assert (*lockp == _MUTEX_UNLOCKED || *lockp == _MUTEX_LOCKED
+ || *lockp == _MUTEX_WAITERS);
+
+ int c = *lockp;
+ if (c == _MUTEX_UNLOCKED)
+ /* It looks free. Try to acquire it. */
+ c = __sync_val_compare_and_swap (lockp, _MUTEX_UNLOCKED, _MUTEX_LOCKED);
+
+ if (c == _MUTEX_UNLOCKED)
+ /* We got the lock! */
+ {
+ ss_mutex_trace_add (SS_MUTEX_LOCK, caller, line, lockp);
+ return;
+ }
+
+ /* Someone else owns the lock. */
ss_mutex_trace_add (SS_MUTEX_LOCK_WAIT, caller, line, lockp);
- if (c != _MUTEX_WAITERS)
+ if (c == _MUTEX_LOCKED)
/* Note that there are waiters. */
- c = atomic_exchange_acq (lockp, _MUTEX_WAITERS);
+ {
+ c = __sync_val_compare_and_swap (lockp,
+ _MUTEX_LOCKED, _MUTEX_WAITERS);
+ if (c == _MUTEX_UNLOCKED)
+ /* It's suddenly free. */
+ continue;
+ }
- /* Try to sleep but only if LOCKP is _MUTEX_WAITERS. */
- while (c != _MUTEX_UNLOCKED)
+ /* Try to sleep--but only if LOCKP is _MUTEX_WAITERS. */
+ if (futex_wait (lockp, _MUTEX_WAITERS) == -1 && errno == EDEADLK)
{
- if (futex_wait (lockp, _MUTEX_WAITERS) == -1 && errno == EDEADLK)
- {
- debug (0, "Possible deadlock: %p!", lockp);
- extern int backtrace (void **array, int size);
- void *a[20];
- int c = backtrace (a, sizeof (a) / sizeof (a[0]));
- int i;
- for (i = 0; i < c; i ++)
- debug (0, "%p", a[i]);
- ss_lock_trace_dump (lockp);
- }
- c = atomic_exchange_acq (lockp, _MUTEX_WAITERS);
+ debug (0, "Possible deadlock: %p!", lockp);
+ extern int backtrace (void **array, int size);
+ void *a[20];
+ int c = backtrace (a, sizeof (a) / sizeof (a[0]));
+ int i;
+ for (i = 0; i < c; i ++)
+ debug (0, "%p", a[i]);
+ ss_lock_trace_dump (lockp);
}
}
-
- ss_mutex_trace_add (SS_MUTEX_LOCK, caller, line, lockp);
}
#define ss_mutex_lock(__sml_lockp) \
@@ -97,12 +108,15 @@ ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lockp)
static inline void
ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lockp)
{
+ assertx (*lockp == _MUTEX_LOCKED || *lockp == _MUTEX_WAITERS,
+ "%p: %d", lockp, *lockp);
+
/* We rely on the knowledge that unlocked is 0, locked and no
waiters is 1 and locked with waiters is 2. Thus if *lockp is 1,
an atomic dec yields 1 (the old value) and we know that there are
no waiters. */
- if (atomic_decrement_and_test (lockp) != _MUTEX_LOCKED)
- /* There are waiters. */
+ if (__sync_fetch_and_add (lockp, -1) == _MUTEX_WAITERS)
+ /* There are waiters. Wake them. */
{
*lockp = 0;
futex_wake (lockp, 1);
@@ -122,9 +136,14 @@ ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lockp)
static inline bool
ss_mutex_trylock (__const char *caller, int line, ss_mutex_t *lockp)
{
- int c;
- c = atomic_compare_and_exchange_val_acq (lockp, _MUTEX_LOCKED,
- _MUTEX_UNLOCKED);
+ assert (*lockp == _MUTEX_UNLOCKED || *lockp == _MUTEX_LOCKED
+ || *lockp == _MUTEX_WAITERS);
+
+ int c = *lockp;
+ if (*lockp == _MUTEX_UNLOCKED)
+ /* It looks as if it is free. Try to acquire it. */
+ c = __sync_val_compare_and_swap (lockp, _MUTEX_UNLOCKED, _MUTEX_LOCKED);
+
if (c == _MUTEX_UNLOCKED)
/* Got the lock. */
{