summaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2006-03-28 09:36:28 +0000
committerJakub Jelinek <jakub@redhat.com>2006-03-28 09:36:28 +0000
commit5a6a773f6c388e7740afb495fcd03d3b500f30cb (patch)
treead7b4e3dd8bb8a750bf6b3a016c591e5d98026dc /nptl
parent0c20be13c576b849ab201bd887a6585973a49d0e (diff)
Updated to fedora-glibc-20060328T0900cvs/fedora-
Diffstat (limited to 'nptl')
-rw-r--r--nptl/ChangeLog32
-rw-r--r--nptl/Makefile2
-rw-r--r--nptl/allocatestack.c24
-rw-r--r--nptl/descr.h45
-rw-r--r--nptl/init.c28
-rw-r--r--nptl/pthreadP.h31
-rw-r--r--nptl/pthread_create.c46
-rw-r--r--nptl/pthread_mutex_consistent.c2
-rw-r--r--nptl/pthread_mutex_destroy.c12
-rw-r--r--nptl/pthread_mutex_init.c17
-rw-r--r--nptl/pthread_mutex_lock.c50
-rw-r--r--nptl/pthread_mutex_timedlock.c44
-rw-r--r--nptl/pthread_mutex_trylock.c51
-rw-r--r--nptl/pthread_mutex_unlock.c16
-rw-r--r--nptl/sysdeps/unix/sysv/linux/ia64/dl-sysdep.h4
-rw-r--r--nptl/tst-robust8.c264
16 files changed, 543 insertions, 125 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 678a419aca..64cea204e3 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,35 @@
+2006-03-27 Ulrich Drepper <drepper@redhat.com>
+
+ * allocatestack.c (allocate_stack): Always initialize robust_head.
+ * descr.h: Define struct robust_list_head.
+ (struct pthread): Use robust_list_head in robust mutex list definition.
+ Adjust ENQUEUE_MUTEX and DEQUEUE_MUTEX.
+ * init.c [!__ASSUME_SET_ROBUST_LIST] (__set_robust_list_avail): Define.
+ (__pthread_initialize_minimal_internal): Register robust_list with
+ the kernel.
+ * pthreadP.h: Remove PRIVATE_ from PTHREAD_MUTEX_ROBUST_* names.
+ Declare __set_robust_list_avail.
+ * pthread_create.c (start_thread): Register robust_list of new thread.
+ [!__ASSUME_SET_ROBUST_LIST]: If robust_list is not empty wake up
+ waiters.
+ * pthread_mutex_destroy.c: For robust mutexes don't look at the
+ number of users, it's unreliable.
+ * pthread_mutex_init.c: Allow use of pshared robust mutexes if
+ set_robust_list syscall is available.
+ * pthread_mutex_consistent.c: Adjust for PTHREAD_MUTEX_ROBUST_* rename.
+ * pthread_mutex_lock.c: Simplify robust mutex code a bit.
+ Set robust_head.list_op_pending before trying to lock a robust mutex.
+ * pthread_mutex_timedlock.c: Likewise.
+ * pthread_mutex_trylock.c: Likewise.
+ * pthread_mutex_unlock.c: Likewise for unlocking.
+ * Makefile (tests): Add tst-robust8.
+ * tst-robust8.c: New file.
+
+2006-03-08 Andreas Schwab <schwab@suse.de>
+
+ * sysdeps/unix/sysv/linux/ia64/dl-sysdep.h
+ (DL_SYSINFO_IMPLEMENTATION): Add missing newline.
+
2006-03-05 Roland McGrath <roland@redhat.com>
* configure (libc_add_on): Disable add-on when $add_ons_automatic = yes
diff --git a/nptl/Makefile b/nptl/Makefile
index 31b5ace92e..e430b8d6cb 100644
--- a/nptl/Makefile
+++ b/nptl/Makefile
@@ -206,7 +206,7 @@ tests = tst-typesizes \
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
tst-cond20 tst-cond21 \
tst-robust1 tst-robust2 tst-robust3 tst-robust4 tst-robust5 \
- tst-robust6 tst-robust7 \
+ tst-robust6 tst-robust7 tst-robust8 \
tst-rwlock1 tst-rwlock2 tst-rwlock3 tst-rwlock4 tst-rwlock5 \
tst-rwlock6 tst-rwlock7 tst-rwlock8 tst-rwlock9 tst-rwlock10 \
tst-rwlock11 tst-rwlock12 tst-rwlock13 tst-rwlock14 \
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 046a2470fc..a3ed1a33d3 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -365,12 +365,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
- /* List of robust mutexes. */
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
- pd->robust_list.__prev = &pd->robust_list;
-#endif
- pd->robust_list.__next = &pd->robust_list;
-
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
@@ -505,12 +499,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
- /* List of robust mutexes. */
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
- pd->robust_list.__prev = &pd->robust_list;
-#endif
- pd->robust_list.__next = &pd->robust_list;
-
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
@@ -634,6 +622,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
stillborn thread could be canceled while the lock is taken. */
pd->lock = LLL_LOCK_INITIALIZER;
+ /* The robust mutex lists also need to be initialized
+ unconditionally because the cleanup for the previous stack owner
+ might have happened in the kernel. */
+ pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
+ - offsetof (pthread_mutex_t,
+ __data.__list.__next));
+ pd->robust_head.list_op_pending = NULL;
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_prev = &pd->robust_head;
+#endif
+ pd->robust_head.list = &pd->robust_head;
+
/* We place the thread descriptor at the end of the stack. */
*pdp = pd;
diff --git a/nptl/descr.h b/nptl/descr.h
index 80251b920b..f89d3240da 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -102,6 +102,15 @@ struct xid_command
};
+/* Data structure used by the kernel to find robust futexes. */
+struct robust_list_head
+{
+ void *list;
+ long int futex_offset;
+ void *list_op_pending;
+};
+
+
/* Thread descriptor data structure. */
struct pthread
{
@@ -136,25 +145,43 @@ struct pthread
/* List of robust mutexes the thread is holding. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
- __pthread_list_t robust_list;
+ void *robust_prev;
+ struct robust_list_head robust_head;
+
+ /* The list above is strange. It is basically a double linked list
+ but the pointer to the next/previous element of the list points
+ in the middle of the object, the __next element. Whenever
+ casting to __pthread_list_t we need to adjust the pointer
+ first. */
+# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
# define ENQUEUE_MUTEX(mutex) \
do { \
- __pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
- next->__prev = &mutex->__data.__list; \
- mutex->__data.__list.__next = next; \
- mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \
- THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
+ __pthread_list_t *next = (THREAD_GETMEM (THREAD_SELF, robust_head.list) \
+ - QUEUE_PTR_ADJUST); \
+ next->__prev = (void *) &mutex->__data.__list.__next; \
+ mutex->__data.__list.__next = (void *) &next->__next; \
+ mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
+ THREAD_SETMEM (THREAD_SELF, robust_head.list, \
+ &mutex->__data.__list.__next); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
- mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \
- mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \
+ __pthread_list_t *next = (__pthread_list_t *) \
+ ((char *) mutex->__data.__list.__next - QUEUE_PTR_ADJUST); \
+ next->__prev = mutex->__data.__list.__prev; \
+ __pthread_list_t *prev = (__pthread_list_t *) \
+ ((char *) mutex->__data.__list.__prev - QUEUE_PTR_ADJUST); \
+ prev->__next = mutex->__data.__list.__next; \
mutex->__data.__list.__prev = NULL; \
mutex->__data.__list.__next = NULL; \
} while (0)
#else
- __pthread_slist_t robust_list;
+ union
+ {
+ __pthread_slist_t robust_list;
+ struct robust_list_head robust_head;
+ };
# define ENQUEUE_MUTEX(mutex) \
do { \
diff --git a/nptl/init.c b/nptl/init.c
index cb63ff7a6d..4db3e0c828 100644
--- a/nptl/init.c
+++ b/nptl/init.c
@@ -60,6 +60,15 @@
size_t __static_tls_size;
size_t __static_tls_align_m1;
+#ifndef __ASSUME_SET_ROBUST_LIST
+/* Negative if we do not have the system call and we can use it. */
+int __set_robust_list_avail;
+# define set_robust_list_not_avail() \
+ __set_robust_list_avail = -1
+#else
+# define set_robust_list_not_avail() do { } while (0)
+#endif
+
/* Version of the library, used in libthread_db to detect mismatches. */
static const char nptl_version[] __attribute_used__ = VERSION;
@@ -247,10 +256,6 @@ __pthread_initialize_minimal_internal (void)
struct pthread *pd = THREAD_SELF;
INTERNAL_SYSCALL_DECL (err);
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
- pd->robust_list.__prev = &pd->robust_list;
-#endif
- pd->robust_list.__next = &pd->robust_list;
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, user_stack, true);
if (LLL_LOCK_INITIALIZER != 0)
@@ -259,6 +264,21 @@ __pthread_initialize_minimal_internal (void)
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif
+ /* Initialize the robust mutex data. */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_prev = &pd->robust_head;
+#endif
+ pd->robust_head.list = &pd->robust_head;
+#ifdef __NR_set_robust_list
+ pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
+ - offsetof (pthread_mutex_t,
+ __data.__list.__next));
+ int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
+ sizeof (struct robust_list_head));
+ if (INTERNAL_SYSCALL_ERROR_P (res, err))
+#endif
+ set_robust_list_not_avail ();
+
/* Set initial thread's stack block from 0 up to __libc_stack_end.
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
purposes this is good enough. */
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index a4d6d1a1ae..7b3da83786 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -31,6 +31,7 @@
#include <internaltypes.h>
#include <pthread-functions.h>
#include <atomic.h>
+#include <kernel-features.h>
/* Atomic operations on TLS memory. */
@@ -60,13 +61,13 @@
/* Internal mutex type value. */
enum
{
- PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 16,
- PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP
- = PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_RECURSIVE_NP,
- PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP
- = PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
- PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP
- = PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
+ PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
+ PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
+ PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
+ PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP = 32,
PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP = 64
};
@@ -128,6 +129,11 @@ hidden_proto (__pthread_keys)
/* Number of threads running. */
extern unsigned int __nptl_nthreads attribute_hidden;
+#ifndef __ASSUME_SET_ROBUST_LIST
+/* Negative if we do not have the system call and we can use it. */
+extern int __set_robust_list_avail attribute_hidden;
+#endif
+
/* The library can run in debugging mode where it performs a lot more
tests. */
extern int __pthread_debug attribute_hidden;
@@ -504,4 +510,15 @@ extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
#endif
+
+#ifndef __NR_set_robust_list
+/* XXX For the time being... Once we can rely on the kernel headers
+ having the definition remove these lines. */
+# if defined __i386__
+# define __NR_set_robust_list 311
+# elif defined __x86_64__
+# define __NR_set_robust_list 273
+# endif
+#endif
+
#endif /* pthreadP.h */
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index f3d90ecebf..71365a17e8 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -229,6 +229,19 @@ start_thread (void *arg)
/* Initialize resolver state pointer. */
__resp = &pd->res;
+#ifdef __NR_set_robust_list
+# ifndef __ASSUME_SET_ROBUST_LIST
+ if (__set_robust_list_avail >= 0)
+# endif
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ /* This call should never fail because the initial call in init.c
+ succeeded. */
+ INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
+ sizeof (struct robust_list_head));
+ }
+#endif
+
/* This is where the try/finally block should be created. For
compilers without that support we do use setjmp. */
struct pthread_unwind_buf unwind_buf;
@@ -310,35 +323,34 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
+#ifndef __ASSUME_SET_ROBUST_LIST
/* If this thread has any robust mutexes locked, handle them now. */
-#if __WORDSIZE == 64
- __pthread_list_t *robust = pd->robust_list.__next;
-#else
+# if __WORDSIZE == 64
+ void *robust = pd->robust_head.list;
+# else
__pthread_slist_t *robust = pd->robust_list.__next;
-#endif
- if (__builtin_expect (robust != &pd->robust_list, 0))
+# endif
+/* We let the kernel do the notification if it is able to do so. */
+ if (__set_robust_list_avail < 0
+ && __builtin_expect (robust != &pd->robust_head, 0))
{
do
{
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
- ((char *) robust - offsetof (struct __pthread_mutex_s, __list));
- robust = robust->__next;
+ ((char *) robust - offsetof (struct __pthread_mutex_s,
+ __list.__next));
+ robust = *((void **) robust);
- this->__list.__next = NULL;
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
+# ifdef __PTHREAD_MUTEX_HAVE_PREV
this->__list.__prev = NULL;
-#endif
+# endif
+ this->__list.__next = NULL;
lll_robust_mutex_dead (this->__lock);
}
- while (robust != &pd->robust_list);
-
- /* Clean up so that the thread descriptor can be reused. */
- pd->robust_list.__next = &pd->robust_list;
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
- pd->robust_list.__prev = &pd->robust_list;
-#endif
+ while (robust != &pd->robust_head);
}
+#endif
/* If the thread is detached free the TCB. */
if (IS_DETACHED (pd))
diff --git a/nptl/pthread_mutex_consistent.c b/nptl/pthread_mutex_consistent.c
index 0cfe972da0..d4f287b755 100644
--- a/nptl/pthread_mutex_consistent.c
+++ b/nptl/pthread_mutex_consistent.c
@@ -26,7 +26,7 @@ pthread_mutex_consistent_np (mutex)
pthread_mutex_t *mutex;
{
/* Test whether this is a robust mutex with a dead owner. */
- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) == 0
+ if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
return EINVAL;
diff --git a/nptl/pthread_mutex_destroy.c b/nptl/pthread_mutex_destroy.c
index 19a647a846..7829979f35 100644
--- a/nptl/pthread_mutex_destroy.c
+++ b/nptl/pthread_mutex_destroy.c
@@ -25,15 +25,9 @@ int
__pthread_mutex_destroy (mutex)
pthread_mutex_t *mutex;
{
- if (mutex->__data.__nusers != 0)
- {
- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) != 0
- && (mutex->__data.__lock & FUTEX_OWNER_DIED) != 0
- && mutex->__data.__nusers == 1)
- goto dead_robust_mutex;
-
- return EBUSY;
- }
+ if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
+ && mutex->__data.__nusers != 0)
+ return EBUSY;
/* Set to an invalid value. */
dead_robust_mutex:
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index f984d90ae4..c25e4035e5 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -22,7 +22,6 @@
#include <string.h>
#include "pthreadP.h"
-
static const struct pthread_mutexattr default_attr =
{
/* Default is a normal mutex, not shared between processes. */
@@ -42,10 +41,6 @@ __pthread_mutex_init (mutex, mutexattr)
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
/* Sanity checks. */
- // XXX For now we cannot implement robust mutexes if they are shared.
- if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
- && (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0)
- return ENOTSUP;
// XXX For now we don't support priority inherited or priority protected
// XXX mutexes.
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
@@ -57,8 +52,18 @@ __pthread_mutex_init (mutex, mutexattr)
/* Copy the values from the attribute. */
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
+
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
- mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_PRIVATE_NP;
+ {
+#ifndef __ASSUME_SET_ROBUST_LIST
+ if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
+ && __set_robust_list_avail < 0)
+ return ENOTSUP;
+#endif
+
+ mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+ }
+
switch ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)
{
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index dd22567c71..06eef49c71 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -108,25 +108,33 @@ __pthread_mutex_lock (mutex)
assert (mutex->__data.__owner == 0);
break;
- case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ &mutex->__data.__list.__next);
+
oldval = mutex->__data.__lock;
do
{
+ again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
- int newval;
- while ((newval
- = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
- id, oldval))
- != oldval)
+ int newval = id;
+#ifdef NO_INCR
+ newval |= FUTEX_WAITERS;
+#endif
+
+ newval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ newval, oldval);
+
+ if (newval != oldval)
{
- if ((newval & FUTEX_OWNER_DIED) == 0)
- goto normal;
oldval = newval;
+ goto again;
}
/* We got the mutex. */
@@ -135,6 +143,7 @@ __pthread_mutex_lock (mutex)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
@@ -149,18 +158,23 @@ __pthread_mutex_lock (mutex)
return EOWNERDEAD;
}
- normal:
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
- == id, 0))
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
- return EDEADLK;
+ == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+ return EDEADLK;
+ }
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+ == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@@ -180,6 +194,7 @@ __pthread_mutex_lock (mutex)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_mutex_unlock (mutex->__data.__lock);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
@@ -187,6 +202,7 @@ __pthread_mutex_lock (mutex)
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index b69caedc7c..7c48c7ce6b 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -103,25 +103,27 @@ pthread_mutex_timedlock (mutex, abstime)
}
break;
- case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ &mutex->__data.__list.__next);
+
oldval = mutex->__data.__lock;
do
{
+ again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
- int newval;
- while ((newval
- = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
- id, oldval))
- != oldval)
+ int newval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ id, oldval);
+ if (newval != oldval)
{
- if ((newval & FUTEX_OWNER_DIED) == 0)
- goto normal;
oldval = newval;
+ goto again;
}
/* We got the mutex. */
@@ -130,6 +132,7 @@ pthread_mutex_timedlock (mutex, abstime)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exist here. If we fall
through to the end of the function __nusers would be
@@ -138,18 +141,23 @@ pthread_mutex_timedlock (mutex, abstime)
return EOWNERDEAD;
}
- normal:
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
- == id, 0))
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
- return EDEADLK;
+ == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+ return EDEADLK;
+ }
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+ == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@@ -170,6 +178,7 @@ pthread_mutex_timedlock (mutex, abstime)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_mutex_unlock (mutex->__data.__lock);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
@@ -182,6 +191,7 @@ pthread_mutex_timedlock (mutex, abstime)
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index 5a13ea6925..148a6e919f 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -77,25 +77,28 @@ __pthread_mutex_trylock (mutex)
return 0;
- case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ &mutex->__data.__list.__next);
+
oldval = mutex->__data.__lock;
do
{
+ again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
- int newval;
- while ((newval
- = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
- id, oldval))
- != oldval)
+ int newval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ id, oldval);
+
+ if (newval != oldval)
{
- if ((newval & FUTEX_OWNER_DIED) == 0)
- goto normal;
oldval = newval;
+ goto again;
}
/* We got the mutex. */
@@ -104,6 +107,7 @@ __pthread_mutex_trylock (mutex)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exist here. If we fall
through to the end of the function __nusers would be
@@ -112,18 +116,23 @@ __pthread_mutex_trylock (mutex)
return EOWNERDEAD;
}
- normal:
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
- == id, 0))
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
- return EDEADLK;
+ == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+ return EDEADLK;
+ }
if (mutex->__data.__kind
- == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+ == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@@ -137,7 +146,11 @@ __pthread_mutex_trylock (mutex)
oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
- return EBUSY;
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ return EBUSY;
+ }
robust:
if (__builtin_expect (mutex->__data.__owner
@@ -147,12 +160,14 @@ __pthread_mutex_trylock (mutex)
mutex->__data.__count = 0;
if (oldval == id)
lll_mutex_unlock (mutex->__data.__lock);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
mutex->__data.__owner = id;
++mutex->__data.__nusers;
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index d41eefe34c..bf9aa7625f 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -63,10 +63,12 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
lll_mutex_unlock (mutex->__data.__lock);
break;
- case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
/* Recursive mutex. */
if ((mutex->__data.__lock & FUTEX_TID_MASK)
- == THREAD_GETMEM (THREAD_SELF, tid))
+ == THREAD_GETMEM (THREAD_SELF, tid)
+ && __builtin_expect (mutex->__data.__owner
+ == PTHREAD_MUTEX_INCONSISTENT, 0))
{
if (--mutex->__data.__count != 0)
/* We still hold the mutex. */
@@ -84,9 +86,9 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
goto robust;
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
|| ! lll_mutex_islocked (mutex->__data.__lock))
@@ -102,6 +104,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
robust:
/* Remove mutex from the list. */
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ &mutex->__data.__list.__next);
DEQUEUE_MUTEX (mutex);
mutex->__data.__owner = newowner;
@@ -111,6 +115,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
/* Unlock. */
lll_robust_mutex_unlock (mutex->__data.__lock);
+
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:
diff --git a/nptl/sysdeps/unix/sysv/linux/ia64/dl-sysdep.h b/nptl/sysdeps/unix/sysv/linux/ia64/dl-sysdep.h
index af835c44b1..525b622a68 100644
--- a/nptl/sysdeps/unix/sysv/linux/ia64/dl-sysdep.h
+++ b/nptl/sysdeps/unix/sysv/linux/ia64/dl-sysdep.h
@@ -1,5 +1,5 @@
/* System-specific settings for dynamic linker code. IA-64 version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -57,7 +57,7 @@ extern int _dl_sysinfo_break attribute_hidden;
".body\n\t" \
"break 0x100000;\n\t" \
"br.ret.sptk.many b6;\n\t" \
- ".endp _dl_sysinfo_break" \
+ ".endp _dl_sysinfo_break\n\t" \
".previous");
#endif
diff --git a/nptl/tst-robust8.c b/nptl/tst-robust8.c
new file mode 100644
index 0000000000..19682e594f
--- /dev/null
+++ b/nptl/tst-robust8.c
@@ -0,0 +1,264 @@
+#include <pthread.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+
+
+
+
+static void prepare (void);
+#define PREPARE(argc, argv) prepare ()
+static int do_test (void);
+#define TEST_FUNCTION do_test ()
+#define TIMEOUT 3
+#include "../test-skeleton.c"
+
+
+static int fd;
+#define N 100
+
+static void
+prepare (void)
+{
+ fd = create_temp_file ("tst-robust8", NULL);
+ if (fd == -1)
+ exit (1);
+}
+
+
+#define THESIGNAL SIGKILL
+#define ROUNDS 5
+#define THREADS 9
+
+
+static const struct timespec before = { 0, 0 };
+
+
+static pthread_mutex_t *map;
+
+
+static void *
+tf (void *arg)
+{
+ long int nr = (long int) arg;
+ int fct = nr % 3;
+
+ uint8_t state[N];
+ memset (state, '\0', sizeof (state));
+
+ while (1)
+ {
+ int r = random () % N;
+ if (state[r] == 0)
+ {
+ int e;
+
+ switch (fct)
+ {
+ case 0:
+ e = pthread_mutex_lock (&map[r]);
+ if (e != 0)
+ {
+ printf ("mutex_lock of %d in thread %ld failed with %d\n",
+ r, nr, e);
+ exit (1);
+ }
+ state[r] = 1;
+ break;
+ case 1:
+ e = pthread_mutex_timedlock (&map[r], &before);
+ if (e != 0 && e != ETIMEDOUT)
+ {
+ printf ("\
+mutex_timedlock of %d in thread %ld failed with %d\n",
+ r, nr, e);
+ exit (1);
+ }
+ break;
+ default:
+ e = pthread_mutex_trylock (&map[r]);
+ if (e != 0 && e != EBUSY)
+ {
+ printf ("mutex_trylock of %d in thread %ld failed with %d\n",
+ r, nr, e);
+ exit (1);
+ }
+ break;
+ }
+
+ if (e == EOWNERDEAD)
+ pthread_mutex_consistent_np (&map[r]);
+
+ if (e == 0 || e == EOWNERDEAD)
+ state[r] = 1;
+ }
+ else
+ {
+ int e = pthread_mutex_unlock (&map[r]);
+ if (e != 0)
+ {
+ printf ("mutex_unlock of %d in thread %ld failed with %d\n",
+ r, nr, e);
+ exit (1);
+ }
+
+ state[r] = 0;
+ }
+ }
+}
+
+
+static void
+child (int round)
+{
+ for (int thread = 1; thread <= THREADS; ++thread)
+ {
+ pthread_t th;
+ if (pthread_create (&th, NULL, tf, (void *) (long int) thread) != 0)
+ {
+ printf ("cannot create thread %d in round %d\n", thread, round);
+ exit (1);
+ }
+ }
+
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000000 / ROUNDS;
+ while (nanosleep (&ts, &ts) != 0)
+ /* nothing */;
+
+ /* Time to die. */
+ kill (getpid (), THESIGNAL);
+
+ /* We better never get here. */
+ abort ();
+}
+
+
+static int
+do_test (void)
+{
+ if (ftruncate (fd, N * sizeof (pthread_mutex_t)) != 0)
+ {
+ puts ("cannot size new file");
+ return 1;
+ }
+
+ map = mmap (NULL, N * sizeof (pthread_mutex_t), PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (map == MAP_FAILED)
+ {
+ puts ("mapping failed");
+ return 1;
+ }
+
+ pthread_mutexattr_t ma;
+ if (pthread_mutexattr_init (&ma) != 0)
+ {
+ puts ("mutexattr_init failed");
+ return 0;
+ }
+ if (pthread_mutexattr_setrobust_np (&ma, PTHREAD_MUTEX_ROBUST_NP) != 0)
+ {
+ puts ("mutexattr_setrobust failed");
+ return 1;
+ }
+ if (pthread_mutexattr_setpshared (&ma, PTHREAD_PROCESS_SHARED) != 0)
+ {
+ puts ("mutexattr_setpshared failed");
+ return 1;
+ }
+
+ for (int round = 1; round <= ROUNDS; ++round)
+ {
+ for (int n = 0; n < N; ++n)
+ {
+ int e = pthread_mutex_init (&map[n], &ma);
+ if (e == ENOTSUP)
+ {
+ puts ("cannot support pshared robust mutexes");
+ return 0;
+ }
+ if (e != 0)
+ {
+ printf ("mutex_init %d in round %d failed\n", n + 1, round);
+ return 1;
+ }
+ }
+
+ pid_t p = fork ();
+ if (p == -1)
+ {
+ printf ("fork in round %d failed\n", round);
+ return 1;
+ }
+ if (p == 0)
+ child (round);
+
+ int status;
+ if (TEMP_FAILURE_RETRY (waitpid (p, &status, 0)) != p)
+ {
+ printf ("waitpid in round %d failed\n", round);
+ return 1;
+ }
+ if (!WIFSIGNALED (status))
+ {
+ printf ("child did not die of a signal in round %d\n", round);
+ return 1;
+ }
+ if (WTERMSIG (status) != THESIGNAL)
+ {
+ printf ("child did not die of signal %d in round %d\n",
+ THESIGNAL, round);
+ return 1;
+ }
+
+ for (int n = 0; n < N; ++n)
+ {
+ int e = pthread_mutex_lock (&map[n]);
+ if (e != 0 && e != EOWNERDEAD)
+ {
+ printf ("mutex_lock %d failed in round %d\n", n + 1, round);
+ return 1;
+ }
+ }
+
+ for (int n = 0; n < N; ++n)
+ if (pthread_mutex_unlock (&map[n]) != 0)
+ {
+ printf ("mutex_unlock %d failed in round %d\n", n + 1, round);
+ return 1;
+ }
+
+ for (int n = 0; n < N; ++n)
+ {
+ int e = pthread_mutex_destroy (&map[n]);
+ if (e != 0)
+ {
+ printf ("mutex_destroy %d in round %d failed with %d\n",
+ n + 1, round, e);
+ printf("nusers = %d\n", (int) map[n].__data.__nusers);
+ return 1;
+ }
+ }
+ }
+
+ if (pthread_mutexattr_destroy (&ma) != 0)
+ {
+ puts ("mutexattr_destroy failed");
+ return 1;
+ }
+
+ if (munmap (map, N * sizeof (pthread_mutex_t)) != 0)
+ {
+ puts ("munmap failed");
+ return 1;
+ }
+
+ return 0;
+}