summaryrefslogtreecommitdiff
path: root/nptl/pthread_mutex_lock.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2012-12-22 01:03:04 -0800
committerAndi Kleen <ak@linux.intel.com>2013-07-02 08:46:55 -0700
commite8c659d74e011346785355eeef03b7fb6f533c61 (patch)
tree7791d2e0769dc19ff7b549be745e4ddc251c3b7a /nptl/pthread_mutex_lock.c
parent68cc29355f3334c7ad18f648ff9a6383a0916d23 (diff)
Add elision to pthread_mutex_{try,timed,un}lock
Add elision paths to the basic mutex locks. The normal path has a check for RTM and upgrades the lock to RTM when available. Trylocks cannot automatically upgrade, so they check for elision every time. We use a 4 byte value in the mutex to store the lock elision adaptation state. This is separate from the adaptive spin state and uses a separate field. Condition variables currently do not support elision. Recursive mutexes and condition variables may be supported at some point, but are not in the current implementation. Also "trylock" will not automatically enable elision unless some other lock call has been already called on the lock. This version does not use IFUNC, so it means every lock has one additional check for elision. Benchmarking showed the overhead to be negligible.
Diffstat (limited to 'nptl/pthread_mutex_lock.c')
-rw-r--r--nptl/pthread_mutex_lock.c53
1 files changed, 43 insertions, 10 deletions
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index fbedfd7d3a..b37f39a0a9 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -25,6 +25,14 @@
#include <lowlevellock.h>
#include <stap-probe.h>
+#ifndef lll_lock_elision
+#define lll_lock_elision(lock, try_lock, private) ({ \
+ lll_lock (lock, private); 0; })
+#endif
+
+#ifndef lll_trylock_elision
+#define lll_trylock_elision(a,t) lll_trylock(a)
+#endif
#ifndef LLL_MUTEX_LOCK
# define LLL_MUTEX_LOCK(mutex) \
@@ -34,39 +42,60 @@
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
lll_robust_lock ((mutex)->__data.__lock, id, \
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_LOCK_ELISION(mutex) \
+ lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
+ PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
+ lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
+ PTHREAD_MUTEX_PSHARED (mutex))
#endif
+#ifndef FORCE_ELISION
+#define FORCE_ELISION(m, s)
+#endif
static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
__attribute_noinline__;
-
int
__pthread_mutex_lock (mutex)
pthread_mutex_t *mutex;
{
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
- unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
+ unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
LIBC_PROBE (mutex_entry, 1, mutex);
- if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
+ if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
+ | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
return __pthread_mutex_lock_full (mutex);
- pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
-
- if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
- == PTHREAD_MUTEX_TIMED_NP)
+ if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_NP, 1))
{
+ FORCE_ELISION (mutex, goto elision);
simple:
/* Normal mutex. */
LLL_MUTEX_LOCK (mutex);
assert (mutex->__data.__owner == 0);
}
- else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
+#ifdef HAVE_ELISION
+ else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1))
+ {
+ elision: __attribute__((unused))
+ /* This case can never happen on a system without elision,
+ as the mutex type initialization functions will not
+ allow to set the elision flags. */
+ /* Don't record owner or users for elision case. This is a
+ tail call. */
+ return LLL_MUTEX_LOCK_ELISION (mutex);
+ }
+#endif
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_RECURSIVE_NP, 1))
{
/* Recursive mutex. */
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -87,7 +116,8 @@ __pthread_mutex_lock (mutex)
assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
}
- else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
{
if (! __is_smp)
goto simple;
@@ -117,13 +147,16 @@ __pthread_mutex_lock (mutex)
}
else
{
- assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+ assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
/* Check whether we already hold the mutex. */
if (__builtin_expect (mutex->__data.__owner == id, 0))
return EDEADLK;
goto simple;
}
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
/* Record the ownership. */
mutex->__data.__owner = id;
#ifndef NO_INCR