summaryrefslogtreecommitdiff
path: root/linuxthreads
diff options
context:
space:
mode:
Diffstat (limited to 'linuxthreads')
-rw-r--r--linuxthreads/ChangeLog11
-rw-r--r--linuxthreads/Makefile4
-rw-r--r--linuxthreads/internals.h17
-rw-r--r--linuxthreads/ptlongjmp.c44
-rw-r--r--linuxthreads/spinlock.c59
-rw-r--r--linuxthreads/spinlock.h4
6 files changed, 136 insertions, 3 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index 7913e085f1..ba2a20f4d0 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,3 +1,14 @@
+1998-04-20 14:55 Ulrich Drepper <drepper@cygnus.com>
+
+ * Makefile (libpthread-routines): Add ptlongjmp and spinlock.
+ * internals.h: Add definitions for new spinlock implementation.
+ * ptlongjmp.c: New file.
+ * spinlock.c: New file.
+ * spinlock.h (acquire): Don't reschedule using __sched_yield, use
+ new function __pthread_acquire to prevent deadlocks with thread
+ with different priorities.
+ Patches by Xavier Leroy <Xavier.Leroy@inria.fr>.
+
1998-03-16 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
* manager.c (__pthread_manager): Reduce first argument to select
diff --git a/linuxthreads/Makefile b/linuxthreads/Makefile
index 8813ae110f..cdfe06c66e 100644
--- a/linuxthreads/Makefile
+++ b/linuxthreads/Makefile
@@ -32,8 +32,8 @@ extra-libs := libpthread
extra-libs-others := $(extra-libs)
libpthread-routines := attr cancel condvar join manager mutex ptfork \
- pthread signals specific errno lockfile \
- semaphore wrapsyscall rwlock
+ ptlongjmp pthread signals specific errno lockfile \
+ semaphore spinlock wrapsyscall rwlock
libpthread-map := libpthread.map
include ../Rules
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h
index 0649e0d460..c56829684e 100644
--- a/linuxthreads/internals.h
+++ b/linuxthreads/internals.h
@@ -250,6 +250,23 @@ static inline pthread_descr thread_self (void)
#endif
}
+/* Max number of times we must spin on a spinlock calling sched_yield().
+ After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
+
+#ifndef MAX_SPIN_COUNT
+#define MAX_SPIN_COUNT 50
+#endif
+
+/* Duration of sleep (in nanoseconds) when we can't acquire a spinlock
+ after MAX_SPIN_COUNT iterations of sched_yield().
+ With the 2.0 and 2.1 kernels, this MUST BE > 2ms.
+ (Otherwise the kernel does busy-waiting for realtime threads,
+ giving other threads no chance to run.) */
+
+#ifndef SPIN_SLEEP_DURATION
+#define SPIN_SLEEP_DURATION 2000001
+#endif
+
/* Debugging */
#ifdef DEBUG
diff --git a/linuxthreads/ptlongjmp.c b/linuxthreads/ptlongjmp.c
new file mode 100644
index 0000000000..1397dba43a
--- /dev/null
+++ b/linuxthreads/ptlongjmp.c
@@ -0,0 +1,44 @@
+/* Linuxthreads - a simple clone()-based implementation of Posix */
+/* threads for Linux. */
+/* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
+/* */
+/* This program is free software; you can redistribute it and/or */
+/* modify it under the terms of the GNU Library General Public License */
+/* as published by the Free Software Foundation; either version 2 */
+/* of the License, or (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU Library General Public License for more details. */
+
+/* Redefine siglongjmp and longjmp so that they interact correctly
+ with cleanup handlers */
+
+#include <setjmp.h>
+#include "pthread.h"
+#include "internals.h"
+
+static void pthread_cleanup_upto(__jmp_buf target)
+{
+ pthread_descr self = thread_self();
+ struct _pthread_cleanup_buffer * c;
+
+ for (c = self->p_cleanup;
+ c != NULL && _JMPBUF_UNWINDS(target, c);
+ c = c->prev)
+ c->routine(c->arg);
+ self->p_cleanup = c;
+}
+
+void siglongjmp(sigjmp_buf env, int val)
+{
+ pthread_cleanup_upto(env->__jmpbuf);
+ __libc_siglongjmp(env, val);
+}
+
+void longjmp(jmp_buf env, int val)
+{
+ pthread_cleanup_upto(env->__jmpbuf);
+ __libc_longjmp(env, val);
+}
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
new file mode 100644
index 0000000000..170d9ae8d0
--- /dev/null
+++ b/linuxthreads/spinlock.c
@@ -0,0 +1,59 @@
+/* Linuxthreads - a simple clone()-based implementation of Posix */
+/* threads for Linux. */
+/* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
+/* */
+/* This program is free software; you can redistribute it and/or */
+/* modify it under the terms of the GNU Library General Public License */
+/* as published by the Free Software Foundation; either version 2 */
+/* of the License, or (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU Library General Public License for more details. */
+
+/* Spin locks */
+
+#include <sched.h>
+#include <time.h>
+#include "pthread.h"
+#include "internals.h"
+#include "spinlock.h"
+
+/* This function is called if the inlined test-and-set in acquire() failed */
+
+/* The retry strategy is as follows:
+ - We test and set the spinlock MAX_SPIN_COUNT times, calling
+ sched_yield() each time. This gives ample opportunity for other
+ threads with priority >= our priority to make progress and
+ release the spinlock.
+ - If a thread with priority < our priority owns the spinlock,
+ calling sched_yield() repeatedly is useless, since we're preventing
+ the owning thread from making progress and releasing the spinlock.
+ So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
+ using nanosleep(). This again should give time to the owning thread
+ for releasing the spinlock.
+ Notice that the nanosleep() interval must not be too small,
+ since the kernel does busy-waiting for short intervals in a realtime
+ process (!). The smallest duration that guarantees thread
+ suspension is currently 2ms.
+ - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
+ sched_yield(), then sleeping again if needed. */
+
+void __pthread_acquire(int * spinlock)
+{
+ int cnt = 0;
+ struct timespec tm;
+
+ while (testandset(spinlock)) {
+ if (cnt < MAX_SPIN_COUNT) {
+ sched_yield();
+ cnt++;
+ } else {
+ tm.tv_sec = 0;
+ tm.tv_nsec = SPIN_SLEEP_DURATION;
+ nanosleep(&tm, NULL);
+ cnt = 0;
+ }
+ }
+}
diff --git a/linuxthreads/spinlock.h b/linuxthreads/spinlock.h
index d324abbc84..1707d3e42a 100644
--- a/linuxthreads/spinlock.h
+++ b/linuxthreads/spinlock.h
@@ -15,9 +15,11 @@
/* Spin locks */
+extern void __pthread_acquire(int * spinlock);
+
static inline void acquire(int * spinlock)
{
- while (testandset(spinlock)) __sched_yield();
+ if (testandset(spinlock)) __pthread_acquire(spinlock);
}
static inline void release(int * spinlock)