summaryrefslogtreecommitdiff
path: root/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux/powerpc/elision-lock.c')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-lock.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index dd1e4c3b17..98a23f0dd2 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -1,5 +1,5 @@
/* elision-lock.c: Elided pthread mutex lock.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -45,7 +45,10 @@
int
__lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
{
- if (*adapt_count > 0)
+#ifndef __SPE__
+ /* adapt_count is accessed concurrently but is just a hint. Thus,
+ use atomic accesses but relaxed MO is sufficient. */
+ if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
@@ -67,7 +70,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
if (aconf.skip_lock_internal_abort > 0)
- *adapt_count = aconf.skip_lock_internal_abort;
+ atomic_store_relaxed (adapt_count,
+ aconf.skip_lock_internal_abort);
goto use_lock;
}
}
@@ -75,8 +79,10 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
/* Fall back to locks for a bit if retries have been exhausted */
if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
- *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
+ atomic_store_relaxed (adapt_count,
+ aconf.skip_lock_out_of_tbegin_retries);
use_lock:
+#endif
return LLL_LOCK ((*lock), pshared);
}