summaryrefslogtreecommitdiff
path: root/linuxthreads/spinlock.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>1998-06-25 19:36:00 +0000
committerUlrich Drepper <drepper@redhat.com>1998-06-25 19:36:00 +0000
commit3387a425e65b839b68bd2973f6bc5ab22315cc5d (patch)
tree375713a0b865b10b9eddd9c9877ad68cf0bdc851 /linuxthreads/spinlock.c
parentd47aac39992cb1dd705d8c584f4d3979d7ce4602 (diff)
Finish user stack support. Change locking code to be safe in situations with different priorities.
1998-06-25 19:27 Ulrich Drepper <drepper@cygnus.com> * attr.c: Finish user stack support. Change locking code to be safe in situations with different priorities. * cancel.c: Likewise. * condvar.c: Likewise. * internals.h: Likewise. * join.c: Likewise. * manager.c: Likewise. * mutex.c: Likewise. * pthread.c: Likewise. * ptlongjmp.c: Likewise. * queue.h: Likewise. * rwlock.c: Likewise. * semaphore.c: Likewise. * semaphore.h: Likewise. * signals.c: Likewise. * spinlock.c: Likewise. * spinlock.h: Likewise. Patches by Xavier leroy. 1998-06-25 Ulrich Drepper <drepper@cygnus.com> * sysdeps/pthread/pthread.h: Make [sg]et_stacksize and [sg]et_stackaddr prototypes always available. * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define _POSIX_THREAD_ATTR_STACKSIZE and _POSIX_THREAD_ATTR_STACKADDR.
Diffstat (limited to 'linuxthreads/spinlock.c')
-rw-r--r--linuxthreads/spinlock.c123
1 files changed, 120 insertions, 3 deletions
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index 170d9ae8d0..dba5d38fd2 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -12,15 +12,130 @@
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Library General Public License for more details. */
-/* Spin locks */
+/* Internal locks */
#include <sched.h>
#include <time.h>
#include "pthread.h"
#include "internals.h"
#include "spinlock.h"
+#include "restart.h"
-/* This function is called if the inlined test-and-set in acquire() failed */
+/* The status field of a fastlock has the following meaning:
+ 0: fastlock is free
+ 1: fastlock is taken, no thread is waiting on it
+ ADDR: fastlock is taken, ADDR is address of thread descriptor for
+ first waiting thread, other waiting threads are linked via
+ their p_nextwaiting field.
+ The waiting list is not sorted by priority order.
+ Actually, we always insert at top of list (sole insertion mode
+ that can be performed without locking).
+ For __pthread_unlock, we perform a linear search in the list
+ to find the highest-priority, oldest waiting thread.
+ This is safe because there are no concurrent __pthread_unlock
+ operations -- only the thread that locked the mutex can unlock it. */
+
+void __pthread_lock(struct _pthread_fastlock * lock)
+{
+ long oldstatus, newstatus;
+ pthread_descr self = NULL;
+
+ do {
+ oldstatus = lock->status;
+ if (oldstatus == 0) {
+ newstatus = 1;
+ } else {
+ self = thread_self();
+ self->p_nextwaiting = (pthread_descr) oldstatus;
+ newstatus = (long) self;
+ }
+ } while(! compare_and_swap(&lock->status, oldstatus, newstatus,
+ &lock->spinlock));
+ if (oldstatus != 0) suspend(self);
+}
+
+int __pthread_trylock(struct _pthread_fastlock * lock)
+{
+ long oldstatus;
+
+ do {
+ oldstatus = lock->status;
+ if (oldstatus != 0) return EBUSY;
+ } while(! compare_and_swap(&lock->status, 0, 1, &lock->spinlock));
+ return 0;
+}
+
+void __pthread_unlock(struct _pthread_fastlock * lock)
+{
+ long oldstatus;
+ pthread_descr thr, * ptr, * maxptr;
+ int maxprio;
+
+again:
+ oldstatus = lock->status;
+ if (oldstatus == 1) {
+ /* No threads are waiting for this lock */
+ if (! compare_and_swap(&lock->status, 1, 0, &lock->spinlock)) goto again;
+ return;
+ }
+ /* Find thread in waiting queue with maximal priority */
+ ptr = (pthread_descr *) &lock->status;
+ thr = (pthread_descr) oldstatus;
+ maxprio = 0;
+ maxptr = ptr;
+ while (thr != (pthread_descr) 1) {
+ if (thr->p_priority >= maxprio) {
+ maxptr = ptr;
+ maxprio = thr->p_priority;
+ }
+ ptr = &(thr->p_nextwaiting);
+ thr = *ptr;
+ }
+ /* Remove max prio thread from waiting list. */
+ if (maxptr == (pthread_descr *) &lock->status) {
+ /* If max prio thread is at head, remove it with compare-and-swap
+ to guard against concurrent lock operation */
+ thr = (pthread_descr) oldstatus;
+ if (! compare_and_swap(&lock->status,
+ oldstatus, (long)(thr->p_nextwaiting),
+ &lock->spinlock))
+ goto again;
+ } else {
+ /* No risk of concurrent access, remove max prio thread normally */
+ thr = *maxptr;
+ *maxptr = thr->p_nextwaiting;
+ }
+ /* Wake up the selected waiting thread */
+ thr->p_nextwaiting = NULL;
+ restart(thr);
+}
+
+/* Compare-and-swap emulation with a spinlock */
+
+#ifdef TEST_FOR_COMPARE_AND_SWAP
+int __pthread_has_cas = 0;
+#endif
+
+#ifndef HAS_COMPARE_AND_SWAP
+
+static void __pthread_acquire(int * spinlock);
+
+int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+ int * spinlock)
+{
+ int res;
+ if (testandset(spinlock)) __pthread_acquire(spinlock);
+ if (*ptr == oldval) {
+ *ptr = newval; res = 1;
+ } else {
+ res = 0;
+ }
+ *spinlock = 0;
+ return res;
+}
+
+/* This function is called if the inlined test-and-set
+ in __pthread_compare_and_swap() failed */
/* The retry strategy is as follows:
- We test and set the spinlock MAX_SPIN_COUNT times, calling
@@ -40,7 +155,7 @@
- When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
sched_yield(), then sleeping again if needed. */
-void __pthread_acquire(int * spinlock)
+static void __pthread_acquire(int * spinlock)
{
int cnt = 0;
struct timespec tm;
@@ -57,3 +172,5 @@ void __pthread_acquire(int * spinlock)
}
}
}
+
+#endif