summaryrefslogtreecommitdiff
path: root/sysdeps/i386/i486/bits/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/i386/i486/bits/atomic.h')
-rw-r--r--sysdeps/i386/i486/bits/atomic.h370
1 files changed, 100 insertions, 270 deletions
diff --git a/sysdeps/i386/i486/bits/atomic.h b/sysdeps/i386/i486/bits/atomic.h
index a27734c37e..c748761758 100644
--- a/sysdeps/i386/i486/bits/atomic.h
+++ b/sysdeps/i386/i486/bits/atomic.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,7 +18,6 @@
02111-1307 USA. */
#include <stdint.h>
-#include <tls.h> /* For tcbhead_t. */
typedef int8_t atomic8_t;
@@ -77,40 +76,6 @@ typedef uintmax_t uatomic_max_t;
: "r" (newval), "m" (*mem), "0" (oldval)); \
ret; })
-
-#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgb %b2, %1" \
- : "=a" (ret), "=m" (*mem) \
- : "q" (newval), "m" (*mem), "0" (oldval), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- ret; })
-
-#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgw %w2, %1" \
- : "=a" (ret), "=m" (*mem) \
- : "r" (newval), "m" (*mem), "0" (oldval), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- ret; })
-
-#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %2, %1" \
- : "=a" (ret), "=m" (*mem) \
- : "r" (newval), "m" (*mem), "0" (oldval), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- ret; })
-
/* XXX We do not really need 64-bit compare-and-exchange. At least
not in the moment. Using it would mean causing portability
problems since not many other 32-bit architectures have support for
@@ -120,8 +85,6 @@ typedef uintmax_t uatomic_max_t;
#if 1
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
-# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
#else
# ifdef __PIC__
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
@@ -137,24 +100,6 @@ typedef uintmax_t uatomic_max_t;
& 0xffffffff), \
"d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
-
-# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret; \
- __asm __volatile ("xchgl %2, %%ebx\n\t" \
- "cmpl $0, %%gs:%P7\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchg8b %1\n\t" \
- "xchgl %2, %%ebx" \
- : "=A" (ret), "=m" (*mem) \
- : "DS" (((unsigned long long int) (newval)) \
- & 0xffffffff), \
- "c" (((unsigned long long int) (newval)) >> 32), \
- "m" (*mem), "a" (((unsigned long long int) (oldval)) \
- & 0xffffffff), \
- "d" (((unsigned long long int) (oldval)) >> 32), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- ret; })
# else
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
@@ -167,22 +112,6 @@ typedef uintmax_t uatomic_max_t;
& 0xffffffff), \
"d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
-
-# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret; \
- __asm __volatile ("cmpl $0, %%gs:%P7\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchg8b %1" \
- : "=A" (ret), "=m" (*mem) \
- : "b" (((unsigned long long int) (newval)) \
- & 0xffffffff), \
- "c" (((unsigned long long int) (newval)) >> 32), \
- "m" (*mem), "a" (((unsigned long long int) (oldval)) \
- & 0xffffffff), \
- "d" (((unsigned long long int) (oldval)) >> 32), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- ret; })
# endif
#endif
@@ -210,24 +139,21 @@ typedef uintmax_t uatomic_max_t;
result; })
-#define __arch_exchange_and_add_body(lock, pfx, mem, value) \
+#define atomic_exchange_and_add(mem, value) \
({ __typeof (*mem) __result; \
__typeof (value) __addval = (value); \
if (sizeof (*mem) == 1) \
- __asm __volatile (lock "xaddb %b0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \
: "=r" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
+ : "0" (__addval), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "xaddw %w0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \
: "=r" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
+ : "0" (__addval), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "xaddl %0, %1" \
+ __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \
: "=r" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
+ : "0" (__addval), "m" (*mem)); \
else \
{ \
__typeof (mem) __memp = (mem); \
@@ -235,64 +161,41 @@ typedef uintmax_t uatomic_max_t;
__result = *__memp; \
do \
__tmpval = __result; \
- while ((__result = pfx##_compare_and_exchange_val_64_acq \
+ while ((__result = __arch_compare_and_exchange_val_64_acq \
(__memp, __result + __addval, __result)) == __tmpval); \
} \
__result; })
-#define atomic_exchange_and_add(mem, value) \
- __arch_exchange_and_add_body (LOCK_PREFIX, __arch, mem, value)
-
-#define __arch_exchange_and_add_cprefix \
- "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_exchange_and_add(mem, value) \
- __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \
- mem, value)
-
-
-#define __arch_add_body(lock, pfx, mem, value) \
- do { \
- if (__builtin_constant_p (value) && (value) == 1) \
- atomic_increment (mem); \
- else if (__builtin_constant_p (value) && (value) == -1) \
- atomic_decrement (mem); \
- else if (sizeof (*mem) == 1) \
- __asm __volatile (lock "addb %b1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "addw %w1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "addl %1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else \
- { \
- __typeof (value) __addval = (value); \
- __typeof (mem) __memp = (mem); \
- __typeof (*mem) __oldval = *__memp; \
- __typeof (*mem) __tmpval; \
- do \
- __tmpval = __oldval; \
- while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
- (__memp, __oldval + __addval, __oldval)) == __tmpval); \
- } \
- } while (0)
#define atomic_add(mem, value) \
- __arch_add_body (LOCK_PREFIX, __arch, mem, value)
-
-#define __arch_add_cprefix \
- "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_add(mem, value) \
- __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
+ (void) ({ if (__builtin_constant_p (value) && (value) == 1) \
+ atomic_increment (mem); \
+ else if (__builtin_constant_p (value) && (value) == -1) \
+ atomic_decrement (mem); \
+ else if (sizeof (*mem) == 1) \
+ __asm __volatile (LOCK_PREFIX "addb %b1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (LOCK_PREFIX "addw %w1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (LOCK_PREFIX "addl %1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem)); \
+ else \
+ { \
+ __typeof (value) __addval = (value); \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*mem) __oldval = *__memp; \
+ __typeof (*mem) __tmpval; \
+ do \
+ __tmpval = __oldval; \
+ while ((__oldval = __arch_compare_and_exchange_val_64_acq \
+ (__memp, __oldval + __addval, __oldval)) == __tmpval); \
+ } \
+ })
#define atomic_add_negative(mem, value) \
@@ -333,42 +236,30 @@ typedef uintmax_t uatomic_max_t;
__result; })
-#define __arch_increment_body(lock, pfx, mem) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "incb %b0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "incw %w0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "incl %0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __typeof (*mem) __oldval = *__memp; \
- __typeof (*mem) __tmpval; \
- do \
- __tmpval = __oldval; \
- while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
- (__memp, __oldval + 1, __oldval)) == __tmpval); \
- } \
- } while (0)
-
-#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
-
-#define __arch_increment_cprefix \
- "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_increment(mem) \
- __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
+#define atomic_increment(mem) \
+ (void) ({ if (sizeof (*mem) == 1) \
+ __asm __volatile (LOCK_PREFIX "incb %b0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (LOCK_PREFIX "incw %w0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (LOCK_PREFIX "incl %0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*mem) __oldval = *__memp; \
+ __typeof (*mem) __tmpval; \
+ do \
+ __tmpval = __oldval; \
+ while ((__oldval = __arch_compare_and_exchange_val_64_acq \
+ (__memp, __oldval + 1, __oldval)) == __tmpval); \
+ } \
+ })
#define atomic_increment_and_test(mem) \
@@ -390,42 +281,30 @@ typedef uintmax_t uatomic_max_t;
__result; })
-#define __arch_decrement_body(lock, pfx, mem) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "decb %b0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "decw %w0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "decl %0" \
- : "=m" (*mem) \
- : "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __typeof (*mem) __oldval = *__memp; \
- __typeof (*mem) __tmpval; \
- do \
- __tmpval = __oldval; \
- while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
- (__memp, __oldval - 1, __oldval)) == __tmpval); \
- } \
- } while (0)
-
-#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
-
-#define __arch_decrement_cprefix \
- "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_decrement(mem) \
- __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
+#define atomic_decrement(mem) \
+ (void) ({ if (sizeof (*mem) == 1) \
+ __asm __volatile (LOCK_PREFIX "decb %b0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (LOCK_PREFIX "decw %w0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (LOCK_PREFIX "decl %0" \
+ : "=m" (*mem) \
+ : "m" (*mem)); \
+ else \
+ { \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*mem) __oldval = *__memp; \
+ __typeof (*mem) __tmpval; \
+ do \
+ __tmpval = __oldval; \
+ while ((__oldval = __arch_compare_and_exchange_val_64_acq \
+ (__memp, __oldval - 1, __oldval)) == __tmpval); \
+ } \
+ })
#define atomic_decrement_and_test(mem) \
@@ -448,22 +327,21 @@ typedef uintmax_t uatomic_max_t;
#define atomic_bit_set(mem, bit) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
- : "=m" (*mem) \
- : "m" (*mem), "ir" (1 << (bit))); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
- : "=m" (*mem) \
- : "m" (*mem), "ir" (1 << (bit))); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "orl %2, %0" \
- : "=m" (*mem) \
- : "m" (*mem), "ir" (1 << (bit))); \
- else \
- abort (); \
- } while (0)
+ (void) ({ if (sizeof (*mem) == 1) \
+ __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
+ : "=m" (*mem) \
+ : "m" (*mem), "ir" (1 << (bit))); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
+ : "=m" (*mem) \
+ : "m" (*mem), "ir" (1 << (bit))); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (LOCK_PREFIX "orl %2, %0" \
+ : "=m" (*mem) \
+ : "m" (*mem), "ir" (1 << (bit))); \
+ else \
+ abort (); \
+ })
#define atomic_bit_test_set(mem, bit) \
@@ -486,51 +364,3 @@ typedef uintmax_t uatomic_max_t;
#define atomic_delay() asm ("rep; nop")
-
-
-#define atomic_and(mem, mask) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "andb %1, %b0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "andw %1, %w0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "andl %1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else \
- abort (); \
- } while (0)
-
-
-#define __arch_or_body(lock, mem, mask) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "orb %1, %b0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "orw %1, %w0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "orl %1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem), \
- "i" (offsetof (tcbhead_t, multiple_threads))); \
- else \
- abort (); \
- } while (0)
-
-#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
-
-#define __arch_or_cprefix \
- "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)