summaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
Diffstat (limited to 'nptl')
-rw-r--r--nptl/ChangeLog29
-rw-r--r--nptl/allocatestack.c97
-rw-r--r--nptl/init.c34
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S98
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h5
-rw-r--r--nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h5
-rw-r--r--nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h5
-rw-r--r--nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h5
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S58
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h7
10 files changed, 305 insertions, 38 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index bc37402583..e3c752ee63 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,32 @@
+2009-01-04 Ulrich Drepper <drepper@redhat.com>
+
+ * init.c (__pthread_initialize_minimal_internal): Optimize test
+ FUTEX_CLOCK_REALTIME a bit.
+
+2009-01-03 Ulrich Drepper <drepper@redhat.com>
+
+ * init.c (__pthread_initialize_minimal_internal): Cheat a bit by
+ only passing five parameters to FUTEX_WAIT_BITSET call.
+
+ * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+ (__lll_timedlock_wait): Use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME
+ instead of computing relative timeout.
+
+2009-01-02 Ulrich Drepper <drepper@redhat.com>
+
+ * init.c (__pthread_initialize_minimal_internal): Check for
+ FUTEX_CLOCK_REALTIME flag.
+ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S (__lll_timedlock_wait):
+ Use FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME instead of computing
+ relative timeout.
+
+ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Define
+ FUTEX_CLOCK_REALTIME and FUTEX_BITSET_MATCH_ANY.
+ * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+
2008-12-09 Ulrich Drepper <drepper@redhat.com>
* sysdeps/pthread/pthread.h (pthread_cleanup_pop): Use { } as empty
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 9ab4d6281c..ce05770e56 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -112,6 +112,11 @@ static LIST_HEAD (stack_cache);
/* List of the stacks in use. */
static LIST_HEAD (stack_used);
+/* We need to record what list operations we are going to do so that,
+ in case of an asynchronous interruption due to a fork() call, we
+ can correct for the work. */
+static uintptr_t in_flight_stack;
+
/* List of the threads with user provided stacks in use. No need to
initialize this, since it's done in __pthread_initialize_minimal. */
list_t __stack_user __attribute__ ((nocommon));
@@ -127,6 +132,36 @@ static unsigned int nptl_ncreated;
#define FREE_P(descr) ((descr)->tid <= 0)
+static void
+stack_list_del (list_t *elem)
+{
+ in_flight_stack = (uintptr_t) elem;
+
+ atomic_write_barrier ();
+
+ list_del (elem);
+
+ atomic_write_barrier ();
+
+ in_flight_stack = 0;
+}
+
+
+static void
+stack_list_add (list_t *elem, list_t *list)
+{
+ in_flight_stack = (uintptr_t) elem | 1;
+
+ atomic_write_barrier ();
+
+ list_add (elem, list);
+
+ atomic_write_barrier ();
+
+ in_flight_stack = 0;
+}
+
+
/* We create a double linked list of all cache entries. Double linked
because this allows removing entries from the end. */
@@ -179,10 +214,10 @@ get_cached_stack (size_t *sizep, void **memp)
}
/* Dequeue the entry. */
- list_del (&result->list);
+ stack_list_del (&result->list);
/* And add to the list of stacks in use. */
- list_add (&result->list, &stack_used);
+ stack_list_add (&result->list, &stack_used);
/* And decrease the cache size. */
stack_cache_actsize -= result->stackblock_size;
@@ -230,7 +265,7 @@ free_stacks (size_t limit)
if (FREE_P (curr))
{
/* Unlink the block. */
- list_del (entry);
+ stack_list_del (entry);
/* Account for the freed memory. */
stack_cache_actsize -= curr->stackblock_size;
@@ -260,7 +295,7 @@ queue_stack (struct pthread *stack)
/* We unconditionally add the stack to the list. The memory may
still be in use but it will not be reused until the kernel marks
the stack as not used anymore. */
- list_add (&stack->list, &stack_cache);
+ stack_list_add (&stack->list, &stack_cache);
stack_cache_actsize += stack->stackblock_size;
if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
@@ -547,7 +582,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
lll_lock (stack_cache_lock, LLL_PRIVATE);
/* And add to the list of stacks in use. */
- list_add (&pd->list, &stack_used);
+ stack_list_add (&pd->list, &stack_used);
lll_unlock (stack_cache_lock, LLL_PRIVATE);
@@ -601,7 +636,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Remove the thread from the list. */
- list_del (&pd->list);
+ stack_list_del (&pd->list);
lll_unlock (stack_cache_lock, LLL_PRIVATE);
@@ -703,7 +738,7 @@ __deallocate_stack (struct pthread *pd)
/* Remove the thread from the list of threads with user defined
stacks. */
- list_del (&pd->list);
+ stack_list_del (&pd->list);
/* Not much to do. Just free the mmap()ed memory. Note that we do
not reset the 'used' flag in the 'tid' field. This is done by
@@ -776,7 +811,47 @@ __reclaim_stacks (void)
{
struct pthread *self = (struct pthread *) THREAD_SELF;
- /* No locking necessary. The caller is the only stack in use. */
+ /* No locking necessary. The caller is the only stack in use. But
+ we have to be aware that we might have interrupted a list
+ operation. */
+
+ if (in_flight_stack != 0)
+ {
+ bool add_p = in_flight_stack & 1;
+ list_t *elem = (list_t *) (in_flight_stack & ~UINTMAX_C (1));
+
+ if (add_p)
+ {
+ /* We always add at the beginning of the list. So in this
+ case we only need to check the beginning of these lists. */
+ int check_list (list_t *l)
+ {
+ if (l->next->prev != l)
+ {
+ assert (l->next->prev == elem);
+
+ elem->next = l->next;
+ elem->prev = l;
+ l->next = elem;
+
+ return 1;
+ }
+
+ return 0;
+ }
+
+ if (check_list (&stack_used) == 0)
+ (void) check_list (&stack_cache);
+ }
+ else
+ {
+ /* We can simply always replay the delete operation. */
+ elem->next->prev = elem->prev;
+ elem->prev->next = elem->next;
+ }
+
+ in_flight_stack = 0;
+ }
/* Mark all stacks except the still running one as free. */
list_t *runp;
@@ -829,7 +904,7 @@ __reclaim_stacks (void)
/* Remove the entry for the current thread to from the cache list
and add it to the list of running threads. Which of the two
lists is decided by the user_stack flag. */
- list_del (&self->list);
+ stack_list_del (&self->list);
/* Re-initialize the lists for all the threads. */
INIT_LIST_HEAD (&stack_used);
@@ -838,7 +913,7 @@ __reclaim_stacks (void)
if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
list_add (&self->list, &__stack_user);
else
- list_add (&self->list, &stack_used);
+ stack_list_add (&self->list, &stack_used);
/* There is one thread running. */
__nptl_nthreads = 1;
diff --git a/nptl/init.c b/nptl/init.c
index d445de0795..7a6dec5935 100644
--- a/nptl/init.c
+++ b/nptl/init.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,6 +18,7 @@
02111-1307 USA. */
#include <assert.h>
+#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdlib.h>
@@ -49,6 +50,15 @@ int __set_robust_list_avail;
# define set_robust_list_not_avail() do { } while (0)
#endif
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+/* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
+int __have_futex_clock_realtime;
+# define __set_futex_clock_realtime() \
+ __have_futex_clock_realtime = 1
+#else
+#define __set_futex_clock_realtime() do { } while (0)
+#endif
+
/* Version of the library, used in libthread_db to detect mismatches. */
static const char nptl_version[] __attribute_used__ = VERSION;
@@ -290,6 +300,28 @@ __pthread_initialize_minimal_internal (void)
if (!INTERNAL_SYSCALL_ERROR_P (word, err))
THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
}
+
+ /* Private futexes have been introduced earlier than the
+ FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
+ know the former are not supported. This also means we know the
+ kernel will return ENOSYS for unknown operations. */
+ if (THREAD_GETMEM (pd, header.private_futex) != 0)
+#endif
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ {
+ int word = 0;
+ /* NB: the syscall actually takes six parameters. The last is the
+ bit mask. But since we will not actually wait at all the value
+ is irrelevant. Given that passing six parameters is difficult
+ on some architectures we just pass whatever random value the
+ calling convention calls for to the kernel. It causes no harm. */
+ word = INTERNAL_SYSCALL (futex, err, 5, &word,
+ FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
+ | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
+ assert (INTERNAL_SYSCALL_ERROR_P (word, err));
+ if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
+ __set_futex_clock_realtime ();
+ }
#endif
/* Set initial thread's stack block from 0 up to __libc_stack_end.
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
index 9c8a68f5b2..056b72900a 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -31,6 +31,8 @@
movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
# define LOAD_FUTEX_WAIT(reg) \
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
# define LOAD_FUTEX_WAKE(reg) \
xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
#else
@@ -55,6 +57,10 @@
andl %gs:PRIVATE_FUTEX, reg ; \
orl $FUTEX_WAIT, reg
# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT | FUTEX_CLOCK_REALTIME, reg
# define LOAD_FUTEX_WAKE(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %gs:PRIVATE_FUTEX, reg ; \
@@ -154,28 +160,81 @@ __lll_lock_wait:
cfi_endproc
.size __lll_lock_wait,.-__lll_lock_wait
+ /* %ecx: futex
+ %esi: flags
+ %edx: timeout
+ %eax: futex value
+ */
.globl __lll_timedlock_wait
.type __lll_timedlock_wait,@function
.hidden __lll_timedlock_wait
.align 16
__lll_timedlock_wait:
cfi_startproc
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ LOAD_PIC_REG (bx)
+ cmpl $0, __have_futex_clock_realtime@GOTOFF(%ebx)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ movl %ecx, %ebx
+ movl %esi, %ecx
+ movl %edx, %esi
+ movl $0xffffffff, %ebp
+ LOAD_FUTEX_WAIT_ABS (%ecx)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ ENTER_KERNEL
+
+2: xchgl %edx, (%ebx) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+7: popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ ret
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
jae 3f
- pushl %edi
- cfi_adjust_cfa_offset(4)
pushl %esi
cfi_adjust_cfa_offset(4)
- pushl %ebx
- cfi_adjust_cfa_offset(4)
- pushl %ebp
+ cfi_rel_offset(%esi, 0)
+ pushl %edi
cfi_adjust_cfa_offset(4)
- cfi_offset(%edi, -8)
- cfi_offset(%esi, -12)
- cfi_offset(%ebx, -16)
- cfi_offset(%ebp, -20)
+ cfi_rel_offset(%edi, 0)
/* Stack frame for the timespec and timeval structs. */
subl $8, %esp
@@ -236,23 +295,24 @@ __lll_timedlock_wait:
6: addl $8, %esp
cfi_adjust_cfa_offset(-8)
- popl %ebp
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%ebp)
- popl %ebx
+ popl %edi
cfi_adjust_cfa_offset(-4)
- cfi_restore(%ebx)
+ cfi_restore(%edi)
popl %esi
cfi_adjust_cfa_offset(-4)
cfi_restore(%esi)
- popl %edi
+7: popl %ebx
cfi_adjust_cfa_offset(-4)
- cfi_restore(%edi)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
movl %edx, %eax
ret
-3: movl $EINVAL, %eax
- ret
+3: movl $EINVAL, %edx
+ jmp 7b
+# endif
cfi_endproc
.size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 993dd4be26..66e0e628fd 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -55,6 +55,9 @@
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
diff --git a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
index 9c4d16f453..b419c9a5e1 100644
--- a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -40,6 +40,9 @@
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
index 76a41bc191..66c02cbbdd 100644
--- a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -41,6 +41,9 @@
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
diff --git a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
index 558d87d67c..c27332e1c7 100644
--- a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -39,6 +39,9 @@
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
index 4505e2cec6..8de9cf461a 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2006, 2007 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -31,6 +31,8 @@
movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
# define LOAD_FUTEX_WAIT(reg) \
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
# define LOAD_FUTEX_WAKE(reg) \
xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
#else
@@ -55,6 +57,10 @@
andl %fs:PRIVATE_FUTEX, reg ; \
orl $FUTEX_WAIT, reg
# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
# define LOAD_FUTEX_WAKE(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %fs:PRIVATE_FUTEX, reg ; \
@@ -143,12 +149,61 @@ __lll_lock_wait:
cfi_endproc
.size __lll_lock_wait,.-__lll_lock_wait
+ /* %rdi: futex
+ %rsi: flags
+ %rdx: timeout
+ %eax: futex value
+ */
.globl __lll_timedlock_wait
.type __lll_timedlock_wait,@function
.hidden __lll_timedlock_wait
.align 16
__lll_timedlock_wait:
cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ syscall
+
+2: xchgl %edx, (%rdi) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
jae 3f
@@ -253,6 +308,7 @@ __lll_timedlock_wait:
3: movl $EINVAL, %eax
retq
+# endif
cfi_endproc
.size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index f76d599c48..0b7e3bbaba 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -48,13 +48,16 @@
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_CMP_REQUEUE 4
-#define FUTEX_WAKE_OP 5
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)