summaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2003-09-21 07:40:24 +0000
committerUlrich Drepper <drepper@redhat.com>2003-09-21 07:40:24 +0000
commit71451de2f1245b21ce3ba407068c453a866c03d6 (patch)
tree6646149e32cd4e762f758fa4796a62aab0ea8adf /nptl
parent56a4aa9886dc1145f8feac66b66216a44cb092c1 (diff)
Update.
2003-09-21 Ulrich Drepper <drepper@redhat.com> * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Completely revamp the locking macros. No distinction between normal and mutex locking anymore. * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Rewrite mutex locking. Merge bits from lowlevelmutex.S we still need. * sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S: Removed. * Makefile (routines): Remove libc-lowlevelmutex. (libpthread-rountines): Remove lowlevelmutex. * pthread_barrier_wait.S: Adjust for new mutex implementation. * pthread_cond_broadcast.S: Likewise. * pthread_cond_timedwait.S: Likewise. * pthread_cond_wait.S: Likewise. * pthread_rwlock_rdlock.S: Likewise. * pthread_rwlock_timedrdlock.S: Likewise. * pthread_rwlock_timedwrlock.S: Likewise. * pthread_rwlock_unlock.S: Likewise. * pthread_rwlock_wrlock.S: Likewise. * pthread_cond_signal.S: Likewise. Don't use requeue.
Diffstat (limited to 'nptl')
-rw-r--r--nptl/ChangeLog21
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S149
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S180
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S20
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S7
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S34
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S21
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S25
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S14
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S20
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S20
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S7
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S14
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h128
14 files changed, 287 insertions, 373 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index dd83161bbb..0562ae947a 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,24 @@
+2003-09-21 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Completely revamp the
+ locking macros. No distinction between normal and mutex locking
+ anymore.
+ * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Rewrite mutex
+ locking. Merge bits from lowlevelmutex.S we still need.
+ * sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S: Removed.
+ * Makefile (routines): Remove libc-lowlevelmutex.
+ (libpthread-rountines): Remove lowlevelmutex.
+ * pthread_barrier_wait.S: Adjust for new mutex implementation.
+ * pthread_cond_broadcast.S: Likewise.
+ * pthread_cond_timedwait.S: Likewise.
+ * pthread_cond_wait.S: Likewise.
+ * pthread_rwlock_rdlock.S: Likewise.
+ * pthread_rwlock_timedrdlock.S: Likewise.
+ * pthread_rwlock_timedwrlock.S: Likewise.
+ * pthread_rwlock_unlock.S: Likewise.
+ * pthread_rwlock_wrlock.S: Likewise.
+ * pthread_cond_signal.S: Likewise. Don't use requeue.
+
2003-09-20 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Don't match memory
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
index f27fe2bc1f..6e4b077295 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -36,11 +36,11 @@
#define FUTEX_WAKE 1
- .globl __lll_lock_wait
- .type __lll_lock_wait,@function
- .hidden __lll_lock_wait
+ .globl __lll_mutex_lock_wait
+ .type __lll_mutex_lock_wait,@function
+ .hidden __lll_mutex_lock_wait
.align 16
-__lll_lock_wait:
+__lll_mutex_lock_wait:
pushl %esi
pushl %ebx
pushl %edx
@@ -48,23 +48,124 @@ __lll_lock_wait:
movl %ecx, %ebx
xorl %esi, %esi /* No timeout. */
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl $2, %edx
+
1:
- leal -1(%eax), %edx /* account for the preceeded xadd. */
+ movl $1, %eax
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ je 2f
+
movl $SYS_futex, %eax
ENTER_KERNEL
- orl $-1, %eax /* Load -1. */
- LOCK
- xaddl %eax, (%ebx)
- jne,pn 1b
+ xorl %eax, %eax
+2: LOCK
+ cmpxchgl %edx, (%ebx)
- movl $-1, (%ebx)
+ testl %eax, %eax
+ jne,pn 1b
popl %edx
popl %ebx
popl %esi
ret
- .size __lll_lock_wait,.-__lll_lock_wait
+ .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_mutex_timedlock_wait
+ .type __lll_mutex_timedlock_wait,@function
+ .hidden __lll_mutex_timedlock_wait
+ .align 16
+__lll_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ cmpl $1000000000, 4(%edx)
+ jae 3f
+
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ pushl %ebp
+
+ /* Stack frame for the timespec and timeval structs. */
+ subl $8, %esp
+
+ movl %ecx, %ebp
+ movl %edx, %edi
+
+1:
+ /* Get current time. */
+ movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $SYS_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 4f
+ addl $1000000000, %edx
+ subl $1, %ecx
+4: testl %ecx, %ecx
+ js 5f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movl %ecx, (%esp)
+ movl %edx, 4(%esp)
+
+ movl %ebp, %ebx
+
+ movl $1, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ je 8f
+
+ /* Futex call. */
+ movl %esp, %esi
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ movl %eax, %ecx
+
+8:
+ xorl %eax, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ jne 7f
+
+6: addl $8, %esp
+ popl %ebp
+ popl %ebx
+ popl %esi
+ popl %edi
+ ret
+
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ je 5f
+ jmp 1b
+
+3: movl $EINVAL, %eax
+ ret
+
+5: movl $ETIMEDOUT, %eax
+ jmp 6b
+ .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+#endif
#ifdef NOT_IN_libc
@@ -79,10 +180,16 @@ lll_unlock_wake_cb:
movl 20(%esp), %ebx
LOCK
- addl $1, (%ebx)
- jng 1f
+ subl $1, (%ebx)
+ je 1f
- popl %edx
+ movl $FUTEX_WAKE, %ecx
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ movl $0, (%ebx)
+ ENTER_KERNEL
+
+1: popl %edx
popl %ecx
popl %ebx
ret
@@ -90,27 +197,27 @@ lll_unlock_wake_cb:
#endif
- .globl __lll_unlock_wake
- .type __lll_unlock_wake,@function
- .hidden __lll_unlock_wake
+ .globl __lll_mutex_unlock_wake
+ .type __lll_mutex_unlock_wake,@function
+ .hidden __lll_mutex_unlock_wake
.align 16
-__lll_unlock_wake:
+__lll_mutex_unlock_wake:
pushl %ebx
pushl %ecx
pushl %edx
movl %eax, %ebx
-1: movl $FUTEX_WAKE, %ecx
+ movl $0, (%eax)
+ movl $FUTEX_WAKE, %ecx
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
- movl %edx, (%ebx) /* Stores '$1'. */
ENTER_KERNEL
popl %edx
popl %ecx
popl %ebx
ret
- .size __lll_unlock_wake,.-__lll_unlock_wake
+ .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
#ifdef NOT_IN_libc
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S
deleted file mode 100644
index a54e48c9e6..0000000000
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S
+++ /dev/null
@@ -1,180 +0,0 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-
- .text
-
-#ifndef LOCK
-# ifdef UP
-# define LOCK
-# else
-# define LOCK lock
-# endif
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
- .align 16
-__lll_mutex_lock_wait:
- pushl %esi
- pushl %ebx
- pushl %edx
-
- /* In the loop we are going to add 2 instead of 1 which is what
- the caller did. Account for that. */
- subl $1, %eax
-
- movl %ecx, %ebx
- xorl %esi, %esi /* No timeout. */
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
-1:
- leal 2(%eax), %edx /* account for the preceeded xadd. */
- movl $SYS_futex, %eax
- ENTER_KERNEL
-
- movl $2, %eax
- LOCK
- xaddl %eax, (%ebx)
- testl %eax, %eax
- jne,pn 1b
-
- popl %edx
- popl %ebx
- popl %esi
- ret
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
-
-#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
- .align 16
-__lll_mutex_timedlock_wait:
- /* Check for a valid timeout value. */
- cmpl $1000000000, 4(%edx)
- jae 3f
-
- pushl %edi
- pushl %esi
- pushl %ebx
- pushl %ebp
-
- /* In the loop we are going to add 2 instead of 1 which is what
- the caller did. Account for that. */
- subl $1, %eax
-
- /* Stack frame for the timespec and timeval structs. */
- subl $8, %esp
-
- movl %ecx, %ebp
- movl %edx, %edi
-
-1: leal 2(%eax), %esi
-
- /* Get current time. */
- movl %esp, %ebx
- xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
- ENTER_KERNEL
-
- /* Compute relative timeout. */
- movl 4(%esp), %eax
- movl $1000, %edx
- mul %edx /* Milli seconds to nano seconds. */
- movl (%edi), %ecx
- movl 4(%edi), %edx
- subl (%esp), %ecx
- subl %eax, %edx
- jns 4f
- addl $1000000000, %edx
- subl $1, %ecx
-4: testl %ecx, %ecx
- js 5f /* Time is already up. */
-
- /* Futex call. */
- movl %ecx, (%esp) /* Store relative timeout. */
- movl %edx, 4(%esp)
- movl %esi, %edx
- movl %esp, %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %ebp, %ebx
- movl $SYS_futex, %eax
- ENTER_KERNEL
- movl %eax, %ecx
-
- movl $2, %eax
- LOCK
- xaddl %eax, (%ebx)
- testl %eax, %eax
- jne 7f
-
- xorl %eax, %eax
-
-6: addl $8, %esp
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
-
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- je 5f
- jmp 1b
-
-3: movl $EINVAL, %eax
- ret
-
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
-#endif
-
-
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
- .align 16
-__lll_mutex_unlock_wake:
- pushl %ebx
- pushl %ecx
- pushl %edx
-
- movl %eax, %ebx
- movl $0, (%eax)
- movl $FUTEX_WAKE, %ecx
- movl $1, %edx /* Wake one thread. */
- movl $SYS_futex, %eax
- ENTER_KERNEL
-
- popl %edx
- popl %ecx
- popl %ebx
- ret
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
index c9b3d8b7bc..08c6e915b9 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -42,9 +42,11 @@ pthread_barrier_wait:
movl 8(%esp), %ebx
/* Get the mutex. */
- orl $-1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
+ testl %eax, %eax
jne 1f
/* One less waiter. If this was the last one needed wake
@@ -63,8 +65,8 @@ pthread_barrier_wait:
/* Release the mutex. */
LOCK
- addl $1, MUTEX(%ebx)
- jng 6f
+ subl $1, MUTEX(%ebx)
+ jne 6f
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
@@ -110,8 +112,8 @@ pthread_barrier_wait:
waking the waiting threads since otherwise a new thread might
arrive and gets waken up, too. */
LOCK
- addl $1, MUTEX(%ebx)
- jng 4f
+ subl $1, MUTEX(%ebx)
+ jne 4f
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
@@ -119,14 +121,14 @@ pthread_barrier_wait:
ret
1: leal MUTEX(%ebx), %ecx
- call __lll_lock_wait
+ call __lll_mutex_lock_wait
jmp 2b
4: leal MUTEX(%ebx), %eax
- call __lll_unlock_wake
+ call __lll_mutex_unlock_wake
jmp 5b
6: leal MUTEX(%ebx), %eax
- call __lll_unlock_wake
+ call __lll_mutex_unlock_wake
jmp 7b
.size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
index 06821ad376..1049e0f4dd 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -51,12 +51,13 @@ __pthread_cond_broadcast:
movl 16(%esp), %ebx
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
index 95f3aad1d8..d86ee054a9 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -45,18 +45,18 @@
__pthread_cond_signal:
pushl %ebx
- pushl %esi
pushl %edi
- movl 16(%esp), %edi
+ movl 12(%esp), %edi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%edi)
+ cmpxchgl %edx, (%edi)
#else
- xaddl %eax, cond_lock(%edi)
+ cmpxchgl %edx, cond_lock(%edi)
#endif
testl %eax, %eax
jne 1f
@@ -78,21 +78,11 @@ __pthread_cond_signal:
adcl $0, 4(%ebx)
/* Wake up one thread by moving it to the internal lock futex. */
- movl $FUTEX_REQUEUE, %ecx
+ movl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
- xorl %edx, %edx
- movl $1, %esi
+ movl $1, %edx
ENTER_KERNEL
-#ifndef __ASSUME_FUTEX_REQUEUE
- cmpl $-EINVAL, %eax
- je 7f
-#endif
-
- /* If we moved a thread we in any case have to make the syscall. */
- testl %eax, %eax
- jne 5f
-
/* Unlock. Note that at this point %edi always points to
cond_lock. */
4: LOCK
@@ -101,7 +91,6 @@ __pthread_cond_signal:
6: xorl %eax, %eax
popl %edi
- popl %esi
popl %ebx
ret
@@ -119,15 +108,6 @@ __pthread_cond_signal:
5: movl %edi, %eax
call __lll_mutex_unlock_wake
jmp 6b
-
-#ifndef __ASSUME_FUTEX_REQUEUE
-7: /* The futex requeue functionality is not available. */
- movl $1, %edx
- movl $FUTEX_WAKE, %ecx
- movl $SYS_futex, %eax
- ENTER_KERNEL
- jmp 4b
-#endif
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
GLIBC_2_3_2)
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
index f88d2d33bb..5799d8f862 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -56,12 +56,13 @@ __pthread_cond_timedwait:
movl 28(%esp), %ebp
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
@@ -170,12 +171,13 @@ __pthread_cond_timedwait:
.LcleanupEND:
/* Lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 5f
@@ -353,12 +355,13 @@ __condvar_tw_cleanup:
movl %eax, %esi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
je 1f
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
index 1680b3d859..db0428f527 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
@@ -53,12 +53,13 @@ __pthread_cond_wait:
movl 16(%esp), %ebx
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 1f
@@ -114,12 +115,13 @@ __pthread_cond_wait:
.LcleanupEND:
/* Lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
jne 5f
@@ -246,12 +248,13 @@ __condvar_w_cleanup:
movl %eax, %esi
/* Get internal lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if cond_lock == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, cond_lock(%ebx)
+ cmpxchgl %edx, cond_lock(%ebx)
#endif
testl %eax, %eax
je 1f
@@ -270,7 +273,11 @@ __condvar_w_cleanup:
adcl $0, woken_seq+4(%ebx)
LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
subl $1, cond_lock(%ebx)
+#endif
je 2f
#if cond_lock == 0
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
index 81d9766056..1c149433f0 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
@@ -46,12 +46,13 @@ __pthread_rwlock_rdlock:
movl 12(%esp), %ebx
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 1f
@@ -85,12 +86,13 @@ __pthread_rwlock_rdlock:
subl $READERS_WAKEUP, %ebx
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 12f
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
index 7d9aa93ebb..409c046b7b 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
@@ -50,12 +50,13 @@ pthread_rwlock_timedrdlock:
movl 32(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 1f
@@ -114,22 +115,23 @@ pthread_rwlock_timedrdlock:
leal READERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %edx
+ movl %eax, %ecx
17:
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 12f
13: subl $1, READERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %edx
+ cmpl $-ETIMEDOUT, %ecx
jne 2b
18: movl $ETIMEDOUT, %ecx
@@ -207,7 +209,7 @@ pthread_rwlock_timedrdlock:
call __lll_mutex_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %edx
+16: movl $-ETIMEDOUT, %ecx
jmp 17b
19: movl $EINVAL, %ecx
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
index 4a144453fa..00d9906e59 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
@@ -50,12 +50,13 @@ pthread_rwlock_timedwrlock:
movl 32(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 1f
@@ -112,22 +113,23 @@ pthread_rwlock_timedwrlock:
leal WRITERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %edx
+ movl %eax, %ecx
17:
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebp)
+ cmpxchgl %edx, (%ebp)
#else
- xaddl %eax, MUTEX(%ebp)
+ cmpxchgl %edx, MUTEX(%ebp)
#endif
testl %eax, %eax
jne 12f
13: subl $1, WRITERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %edx
+ cmpl $-ETIMEDOUT, %ecx
jne 2b
18: movl $ETIMEDOUT, %ecx
@@ -200,7 +202,7 @@ pthread_rwlock_timedwrlock:
call __lll_mutex_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %edx
+16: movl $-ETIMEDOUT, %ecx
jmp 17b
19: movl $EINVAL, %ecx
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
index 35f61bf0f9..839a5eb336 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
@@ -44,12 +44,13 @@ __pthread_rwlock_unlock:
movl 12(%esp), %edi
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%edi)
+ cmpxchgl %edx, (%edi)
#else
- xaddl %eax, MUTEX(%edi)
+ cmpxchgl %edx, MUTEX(%edi)
#endif
testl %eax, %eax
jne 1f
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
index 28c24bad65..89ad6a3774 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
@@ -46,12 +46,13 @@ __pthread_rwlock_wrlock:
movl 12(%esp), %ebx
/* Get the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 1f
@@ -83,12 +84,13 @@ __pthread_rwlock_wrlock:
subl $WRITERS_WAKEUP, %ebx
/* Reget the lock. */
- movl $1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
#if MUTEX == 0
- xaddl %eax, (%ebx)
+ cmpxchgl %edx, (%ebx)
#else
- xaddl %eax, MUTEX(%ebx)
+ cmpxchgl %edx, MUTEX(%ebx)
#endif
testl %eax, %eax
jne 12f
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index ebda2f8420..ba32591623 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -38,7 +38,8 @@
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
#ifdef PIC
@@ -97,7 +98,7 @@ extern int __lll_mutex_timedlock_wait (int val, int *__futex,
const struct timespec *abstime)
__attribute ((regparm (3))) attribute_hidden;
/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex)
+extern int __lll_mutex_unlock_wake (int *__futex)
__attribute ((regparm (1))) attribute_hidden;
@@ -105,14 +106,15 @@ extern int __lll_mutex_unlock_wait (int *__futex)
({ unsigned char ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (1), "m" (futex), "0" (0) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
: "memory"); \
ret; })
#define lll_mutex_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
@@ -124,8 +126,8 @@ extern int __lll_mutex_unlock_wait (int *__futex)
".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
".previous\n" \
"1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (1), "m" (futex) \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex) \
: "memory"); })
@@ -133,7 +135,7 @@ extern int __lll_mutex_unlock_wait (int *__futex)
always wakeup waiters. */
#define lll_mutex_cond_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_cond_lock_%=\n\t" \
".subsection 1\n\t" \
@@ -145,29 +147,29 @@ extern int __lll_mutex_unlock_wait (int *__futex)
".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n" \
".previous\n" \
"1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (2), "m" (futex) \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (2), "m" (futex) \
: "memory"); })
#define lll_mutex_timedlock(futex, timeout) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %3\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
"testl %0, %0\n\t" \
"jne _L_mutex_timedlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_mutex_timedlock_%=,@function\n" \
"_L_mutex_timedlock_%=:\n\t" \
"leal %3, %%ecx\n\t" \
- "movl %6, %%edx\n\t" \
+ "movl %7, %%edx\n\t" \
"call __lll_mutex_timedlock_wait\n\t" \
"jmp 1f\n\t" \
".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
".previous\n" \
"1:" \
- : "=a" (result), "=&c" (ignore1), "=&d" (ignore2), \
+ : "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
"=m" (futex) \
- : "0" (1), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout) \
: "memory"); \
result; })
@@ -201,8 +203,8 @@ extern int __lll_mutex_unlock_wait (int *__futex)
typedef int lll_lock_t;
/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (1)
-#define LLL_LOCK_INITIALIZER_LOCKED (0)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
extern int __lll_lock_wait (int val, int *__futex)
@@ -213,55 +215,15 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
/* The states of a lock are:
- 1 - untaken
- 0 - taken by one user
- <0 - taken by more users */
+ 0 - untaken
+ 1 - taken by one user
+ 2 - taken by more users */
#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (0), "m" (futex), "0" (1) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "xaddl %0, %2\n\t" \
- "jne _L_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_lock_%=,@function\n" \
- "_L_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_lock_%=,.-_L_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (-1), "m" (futex) \
- : "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "addl $1,%0\n\t" \
- "jng _L_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_unlock_%=,@function\n" \
- "_L_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_unlock_%=,.-_L_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex) \
- : "memory"); })
+# define lll_trylock(futex) lll_mutex_trylock (futex)
+# define lll_lock(futex) lll_mutex_lock (futex)
+# define lll_unlock(futex) lll_mutex_unlock (futex)
#else
/* Special versions of the macros for use in libc itself. They avoid
the lock prefix when the thread library is not used.
@@ -276,7 +238,8 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
"lock\n" \
"0:\tcmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (futex) \
- : "r" (0), "m" (futex), "0" (1), \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
: "memory"); \
ret; })
@@ -284,22 +247,23 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
# define lll_lock(futex) \
(void) ({ int ignore1, ignore2; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
+ __asm __volatile ("cmpl $0, %%gs:%P6\n\t" \
"je,pt 0f\n\t" \
"lock\n" \
- "0:\txaddl %0, %2\n\t" \
- "jne _L_lock_%=\n\t" \
+ "0:\tcmpxchgl %1, %2\n\t" \
+ "testl %0, %0\n\t" \
+ "jne _L_mutex_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_lock_%=,@function\n" \
- "_L_lock_%=:\n\t" \
+ ".type _L_mutex_lock_%=,@function\n" \
+ "_L_mutex_lock_%=:\n\t" \
"leal %2, %%ecx\n\t" \
- "call __lll_lock_wait\n\t" \
- "jmp 2f\n\t" \
- ".size _L_lock_%=,.-_L_lock_%=\n" \
+ "call __lll_mutex_lock_wait\n\t" \
+ "jmp 1f\n\t" \
+ ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
".previous\n" \
- "2:" \
- : "=a" (ignore1), "=&c" (ignore2), "=m" (futex) \
- : "0" (-1), "m" (futex), \
+ "1:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
: "memory"); })
@@ -309,17 +273,17 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
__asm __volatile ("cmpl $0, %%gs:%P3\n\t" \
"je,pt 0f\n\t" \
"lock\n" \
- "0:\taddl $1,%0\n\t" \
- "jng _L_unlock_%=\n\t" \
+ "0:\tsubl $1,%0\n\t" \
+ "jne _L_mutex_unlock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_unlock_%=,@function\n" \
- "_L_unlock_%=:\n\t" \
+ ".type _L_mutex_unlock_%=,@function\n" \
+ "_L_mutex_unlock_%=:\n\t" \
"leal %0, %%eax\n\t" \
- "call __lll_unlock_wake\n\t" \
- "jmp 2f\n\t" \
- ".size _L_unlock_%=,.-_L_unlock_%=\n" \
+ "call __lll_mutex_unlock_wake\n\t" \
+ "jmp 1f\n\t" \
+ ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
".previous\n" \
- "2:" \
+ "1:" \
: "=m" (futex), "=&a" (ignore) \
: "m" (futex), \
"i" (offsetof (tcbhead_t, multiple_threads)) \
@@ -328,7 +292,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
#define lll_islocked(futex) \
- (futex != 0)
+ (futex != LLL_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex