summaryrefslogtreecommitdiff
path: root/linuxthreads/spinlock.h
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads/spinlock.h
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03 Kaz Kylheku <kaz@ashi.footprints.net> Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!). * condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a simultaneous condition variable signal (not required by POSIX or Single Unix Spec, but nice). * spinlock.c: __pthread_lock queues back any received restarts that don't belong to it instead of assuming ownership of lock upon any restart; fastlock can no longer be acquired by two threads simultaneously. * restart.h: restarts queue even on kernels that don't have queued real time signals (2.0, early 2.1), thanks to atomic counter, avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads/spinlock.h')
-rw-r--r--linuxthreads/spinlock.h28
1 files changed, 28 insertions, 0 deletions
diff --git a/linuxthreads/spinlock.h b/linuxthreads/spinlock.h
index 29f030406c..aae18a27b4 100644
--- a/linuxthreads/spinlock.h
+++ b/linuxthreads/spinlock.h
@@ -72,3 +72,31 @@ static inline int __pthread_trylock (struct _pthread_fastlock * lock)
}
#define LOCK_INITIALIZER {0, 0}
+
+/* Operations on pthread_atomic, which is defined in internals.h */
+
+static inline long atomic_increment(struct pthread_atomic *pa)
+{
+ long oldval;
+
+ do {
+ oldval = pa->p_count;
+ } while (!compare_and_swap(&pa->p_count, oldval, oldval + 1, &pa->p_spinlock));
+
+ return oldval;
+}
+
+
+static inline long atomic_decrement(struct pthread_atomic *pa)
+{
+ long oldval;
+
+ do {
+ oldval = pa->p_count;
+ } while (!compare_and_swap(&pa->p_count, oldval, oldval - 1, &pa->p_spinlock));
+
+ return oldval;
+}
+
+#define ATOMIC_INITIALIZER { 0, 0 }
+