summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2014-01-03 23:54:19 +0100
committerRichard Braun <rbraun@sceen.net>2014-01-03 23:54:19 +0100
commit99cef9617922016bda314a6e21b499e17e1af56e (patch)
tree375101508499d9521ea9ba9016d8dfc94afdecb5 /arch
parentb16a1df6c33e1b2ae1a4ecb40c781f140bbff63c (diff)
x86/atomic: support atomic operations on ints
Atomic operations are now provided for both unsigned longs and ints. This allows reducing the memory overhead of counters and synchronization objects such as spin locks and mutexes on 64-bits machines.
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/machine/atomic.h116
1 files changed, 86 insertions, 30 deletions
diff --git a/arch/x86/machine/atomic.h b/arch/x86/machine/atomic.h
index cf92c64..fb8428d 100644
--- a/arch/x86/machine/atomic.h
+++ b/arch/x86/machine/atomic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Richard Braun.
+ * Copyright (c) 2012-2014 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,62 +21,118 @@
#ifndef _X86_ATOMIC_H
#define _X86_ATOMIC_H
+#define ATOMIC_ADD(ptr, delta) \
+ asm volatile("lock add %1, %0" \
+ : "+m" (*(ptr)) \
+ : "r" (delta))
+
+#define ATOMIC_AND(ptr, bits) \
+ asm volatile("lock and %1, %0" \
+ : "+m" (*(ptr)) \
+ : "r" (bits))
+
+#define ATOMIC_OR(ptr, bits) \
+ asm volatile("lock or %1, %0" \
+ : "+m" (*(ptr)) \
+ : "r" (bits))
+
+/* The xchg instruction doesn't need a lock prefix */
+#define ATOMIC_SWAP(ptr, oldval, newval) \
+ asm volatile("xchg %1, %0" \
+ : "+m" (*(ptr)), "=r" (oldval) \
+ : "1" (newval) \
+ : "memory")
+
+#define ATOMIC_CAS(ptr, oldval, predicate, newval) \
+ asm volatile("lock cmpxchg %3, %0" \
+ : "+m" (*(ptr)), "=a" (oldval) \
+ : "1" (predicate), "r" (newval) \
+ : "memory")
+
static inline void
-atomic_add(volatile unsigned long *ptr, long delta)
+atomic_add_uint(volatile unsigned int *ptr, int delta)
{
- asm volatile("lock add %1, %0"
- : "+m" (*ptr)
- : "r" (delta));
+ ATOMIC_ADD(ptr, delta);
}
static inline void
-atomic_and(volatile unsigned long *ptr, unsigned long bits)
+atomic_and_uint(volatile unsigned int *ptr, unsigned int bits)
{
- asm volatile("lock and %1, %0"
- : "+m" (*ptr)
- : "r" (bits));
+ ATOMIC_AND(ptr, bits);
}
static inline void
-atomic_or(volatile unsigned long *ptr, unsigned long bits)
+atomic_or_uint(volatile unsigned int *ptr, unsigned int bits)
{
- asm volatile("lock or %1, %0"
- : "+m" (*ptr)
- : "r" (bits));
+ ATOMIC_OR(ptr, bits);
}
/*
* Implies a full memory barrier.
*/
-static inline unsigned long
-atomic_swap(volatile unsigned long *ptr, unsigned long newval)
+static inline unsigned int
+atomic_swap_uint(volatile unsigned int *ptr, unsigned int newval)
+{
+ unsigned int oldval;
+
+ ATOMIC_SWAP(ptr, oldval, newval);
+ return oldval;
+}
+
+/*
+ * Implies a full memory barrier.
+ */
+static inline unsigned int
+atomic_cas_uint(volatile unsigned int *ptr, unsigned int predicate,
+ unsigned int newval)
{
- unsigned long prev;
+ unsigned int oldval;
- /* The xchg instruction doesn't need a lock prefix */
- asm volatile("xchg %1, %0"
- : "+m" (*ptr), "=r" (prev)
- : "1" (newval)
- : "memory");
+ ATOMIC_CAS(ptr, oldval, predicate, newval);
+ return oldval;
+}
- return prev;
+static inline void
+atomic_add_ulong(volatile unsigned long *ptr, long delta)
+{
+ ATOMIC_ADD(ptr, delta);
+}
+
+static inline void
+atomic_and_ulong(volatile unsigned long *ptr, unsigned long bits)
+{
+ ATOMIC_AND(ptr, bits);
+}
+
+static inline void
+atomic_or_ulong(volatile unsigned long *ptr, unsigned long bits)
+{
+ ATOMIC_OR(ptr, bits);
}
/*
* Implies a full memory barrier.
*/
static inline unsigned long
-atomic_cas(volatile unsigned long *ptr, unsigned long oldval,
- unsigned long newval)
+atomic_swap_ulong(volatile unsigned long *ptr, unsigned long newval)
{
- unsigned long prev;
+ unsigned long oldval;
- asm volatile("lock cmpxchg %3, %0"
- : "+m" (*ptr), "=a" (prev)
- : "1" (oldval), "r" (newval)
- : "memory");
+ ATOMIC_SWAP(ptr, oldval, newval);
+ return oldval;
+}
+
+/*
+ * Implies a full memory barrier.
+ */
+static inline unsigned long
+atomic_cas_ulong(volatile unsigned long *ptr, unsigned long predicate,
+ unsigned long newval)
+{
+ unsigned long oldval;
- return prev;
+ ATOMIC_CAS(ptr, oldval, predicate, newval);
+ return oldval;
}
#endif /* _X86_ATOMIC_H */