summaryrefslogtreecommitdiff
path: root/nptl/pthread_mutex_trylock.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_mutex_trylock.c')
-rw-r--r--nptl/pthread_mutex_trylock.c124
1 files changed, 123 insertions, 1 deletions
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index 148a6e919f..f3a18569a1 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -152,7 +152,6 @@ __pthread_mutex_trylock (mutex)
return EBUSY;
}
- robust:
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
@@ -175,6 +174,129 @@ __pthread_mutex_trylock (mutex)
return 0;
+ case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+ {
+ int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+ int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+ if (robust)
+ /* Note: robust PI futexes are signaled by setting bit 0. */
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ | 1));
+
+ oldval = mutex->__data.__lock;
+
+ /* Check whether we already hold the mutex. */
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ {
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return EDEADLK;
+ }
+
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+ }
+
+ oldval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ id, 0);
+
+ if (oldval != 0)
+ {
+ if ((oldval & FUTEX_OWNER_DIED) == 0)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ return EBUSY;
+ }
+
+ assert (robust);
+
+ /* The mutex owner died. The kernel will now take care of
+ everything. */
+ INTERNAL_SYSCALL_DECL (__err);
+ int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ FUTEX_TRYLOCK_PI, 0, 0);
+
+ if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+ && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ return EBUSY;
+ }
+
+ oldval = mutex->__data.__lock;
+ }
+
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ {
+ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+ /* We got the mutex. */
+ mutex->__data.__count = 1;
+ /* But it is inconsistent unless marked otherwise. */
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+ ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Note that we deliberately exit here. If we fall
+ through to the end of the function __nusers would be
+ incremented which is not correct because the old owner
+ has to be discounted. */
+ return EOWNERDEAD;
+ }
+
+ if (robust
+ && __builtin_expect (mutex->__data.__owner
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+ {
+ /* This mutex is now not recoverable. */
+ mutex->__data.__count = 0;
+
+ INTERNAL_SYSCALL_DECL (__err);
+ INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ FUTEX_UNLOCK_PI, 0, 0);
+
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return ENOTRECOVERABLE;
+ }
+
+ if (robust)
+ {
+ ENQUEUE_MUTEX_PI (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ }
+
+ mutex->__data.__owner = id;
+ ++mutex->__data.__nusers;
+ mutex->__data.__count = 1;
+
+ return 0;
+ }
+
default:
/* Correct code cannot set any other type. */
return EINVAL;