summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-07-30 16:51:05 +0200
committerRichard Braun <rbraun@sceen.net>2017-07-30 16:51:05 +0200
commit8f7ade85c97f2570568a65aa92d9f4a4c24da91a (patch)
tree314b29c0414a21f144bfcebfbb6017d6ee7ab8b5
parent993246d139ea64106749ea6a7652efa38ac200a6 (diff)
Replace x86/{atomic,mb} with stdatomic.h
-rw-r--r--Makefile.am4
-rw-r--r--bitmap.h17
-rw-r--r--x86/atomic.h178
-rw-r--r--x86/mb.h78
4 files changed, 15 insertions, 262 deletions
diff --git a/Makefile.am b/Makefile.am
index 916de95..5d4d441 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -44,9 +44,7 @@ librbraun_la_SOURCES = \
rdxtree_i.h \
slist.h \
shell.c \
- shell.h \
- x86/atomic.h \
- x86/mb.h
+ shell.h
librbraun_la_LIBADD = -lrt -lpthread
diff --git a/bitmap.h b/bitmap.h
index 0cfa392..d58b633 100644
--- a/bitmap.h
+++ b/bitmap.h
@@ -35,10 +35,10 @@
#define _BITMAP_H
#include <limits.h>
+#include <stdatomic.h>
#include <string.h>
#include "bitmap_i.h"
-#include "x86/atomic.h"
#define BITMAP_DECLARE(name, nr_bits) unsigned long name[BITMAP_LONGS(nr_bits)]
@@ -88,7 +88,7 @@ bitmap_set_atomic(unsigned long *bm, int bit)
bitmap_lookup(&bm, &bit);
}
- atomic_or_ulong(bm, bitmap_mask(bit));
+ atomic_fetch_or_explicit(bm, bitmap_mask(bit), memory_order_release);
}
static inline void
@@ -108,7 +108,7 @@ bitmap_clear_atomic(unsigned long *bm, int bit)
bitmap_lookup(&bm, &bit);
}
- atomic_and_ulong(bm, ~bitmap_mask(bit));
+ atomic_fetch_and_explicit(bm, ~bitmap_mask(bit), memory_order_acquire);
}
static inline int
@@ -121,6 +121,17 @@ bitmap_test(const unsigned long *bm, int bit)
return ((*bm & bitmap_mask(bit)) != 0);
}
+static inline int
+bitmap_test_atomic(const unsigned long *bm, int bit)
+{
+ if (bit >= LONG_BIT) {
+ bitmap_lookup(&bm, &bit);
+ }
+
+ return ((atomic_load_explicit(bm, memory_order_acquire)
+ & bitmap_mask(bit)) != 0);
+}
+
static inline void
bitmap_and(unsigned long *a, const unsigned long *b, int nr_bits)
{
diff --git a/x86/atomic.h b/x86/atomic.h
deleted file mode 100644
index 52a4e5e..0000000
--- a/x86/atomic.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2012-2015 Richard Braun.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Atomic operations.
- *
- * Upstream site with license notes :
- * http://git.sceen.net/rbraun/librbraun.git/
- */
-
-#ifndef _X86_ATOMIC_H
-#define _X86_ATOMIC_H
-
-#define ATOMIC_ADD(ptr, delta) \
- asm volatile("lock add %1, %0" \
- : "+m" (*(ptr)) \
- : "r" (delta))
-
-#define ATOMIC_FETCHADD(ptr, oldval, delta) \
- asm volatile("lock xadd %1, %0" \
- : "+m" (*(ptr)), "=r" (oldval) \
- : "1" (delta) \
- : "memory")
-
-#define ATOMIC_AND(ptr, bits) \
- asm volatile("lock and %1, %0" \
- : "+m" (*(ptr)) \
- : "r" (bits))
-
-#define ATOMIC_OR(ptr, bits) \
- asm volatile("lock or %1, %0" \
- : "+m" (*(ptr)) \
- : "r" (bits))
-
-/* The xchg instruction doesn't need a lock prefix */
-#define ATOMIC_SWAP(ptr, oldval, newval) \
- asm volatile("xchg %1, %0" \
- : "+m" (*(ptr)), "=r" (oldval) \
- : "1" (newval) \
- : "memory")
-
-#define ATOMIC_CAS(ptr, oldval, predicate, newval) \
- asm volatile("lock cmpxchg %3, %0" \
- : "+m" (*(ptr)), "=a" (oldval) \
- : "1" (predicate), "r" (newval) \
- : "memory")
-
-static inline void
-atomic_add_uint(volatile unsigned int *ptr, int delta)
-{
- ATOMIC_ADD(ptr, delta);
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned int
-atomic_fetchadd_uint(volatile unsigned int *ptr, int delta)
-{
- unsigned int oldval;
-
- ATOMIC_FETCHADD(ptr, oldval, delta);
- return oldval;
-}
-
-static inline void
-atomic_and_uint(volatile unsigned int *ptr, unsigned int bits)
-{
- ATOMIC_AND(ptr, bits);
-}
-
-static inline void
-atomic_or_uint(volatile unsigned int *ptr, unsigned int bits)
-{
- ATOMIC_OR(ptr, bits);
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned int
-atomic_swap_uint(volatile unsigned int *ptr, unsigned int newval)
-{
- unsigned int oldval;
-
- ATOMIC_SWAP(ptr, oldval, newval);
- return oldval;
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned int
-atomic_cas_uint(volatile unsigned int *ptr, unsigned int predicate,
- unsigned int newval)
-{
- unsigned int oldval;
-
- ATOMIC_CAS(ptr, oldval, predicate, newval);
- return oldval;
-}
-
-static inline void
-atomic_add_ulong(volatile unsigned long *ptr, long delta)
-{
- ATOMIC_ADD(ptr, delta);
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned long
-atomic_fetchadd_ulong(volatile unsigned long *ptr, long delta)
-{
- unsigned long oldval;
-
- ATOMIC_FETCHADD(ptr, oldval, delta);
- return oldval;
-}
-
-static inline void
-atomic_and_ulong(volatile unsigned long *ptr, unsigned long bits)
-{
- ATOMIC_AND(ptr, bits);
-}
-
-static inline void
-atomic_or_ulong(volatile unsigned long *ptr, unsigned long bits)
-{
- ATOMIC_OR(ptr, bits);
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned long
-atomic_swap_ulong(volatile unsigned long *ptr, unsigned long newval)
-{
- unsigned long oldval;
-
- ATOMIC_SWAP(ptr, oldval, newval);
- return oldval;
-}
-
-/*
- * Implies a full memory barrier.
- */
-static inline unsigned long
-atomic_cas_ulong(volatile unsigned long *ptr, unsigned long predicate,
- unsigned long newval)
-{
- unsigned long oldval;
-
- ATOMIC_CAS(ptr, oldval, predicate, newval);
- return oldval;
-}
-
-#endif /* _X86_ATOMIC_H */
diff --git a/x86/mb.h b/x86/mb.h
deleted file mode 100644
index 3bb30f8..0000000
--- a/x86/mb.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2012-2015 Richard Braun.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Memory barriers.
- *
- * Keep in mind that memory barriers only act on the ordering of loads and
- * stores between internal processor queues and their caches. In particular,
- * it doesn't imply a store is complete after the barrier has completed, only
- * that other processors will see a new value thanks to the cache coherency
- * protocol. Memory barriers alone aren't suitable for device communication.
- *
- * The x86 architectural memory model (total-store ordering) already enforces
- * strong ordering for almost every access. The only exception is that stores
- * can be reordered after loads. As a result, load and store memory barriers
- * are simple compiler barriers whereas full memory barriers must generate
- * a barrier instruction.
- *
- * Upstream site with license notes :
- * http://git.sceen.net/rbraun/librbraun.git/
- */
-
-#ifndef _X86_MB_H
-#define _X86_MB_H
-
-#include "../macros.h"
-
-#ifdef __LP64__
-
-static inline void
-mb_sync(void)
-{
- asm volatile("mfence" : : : "memory");
-}
-
-#else /* __LP64__ */
-
-static inline void
-mb_sync(void)
-{
- asm volatile("lock addl $0, 0(%%esp)" : : : "memory");
-}
-
-#endif /* __LP64__ */
-
-static inline void
-mb_load(void)
-{
- barrier();
-}
-
-static inline void
-mb_store(void)
-{
- barrier();
-}
-
-#endif /* _X86_MB_H */