summaryrefslogtreecommitdiff
path: root/db2/mutex
diff options
context:
space:
mode:
Diffstat (limited to 'db2/mutex')
-rw-r--r--db2/mutex/68020.gcc19
-rw-r--r--db2/mutex/README105
-rw-r--r--db2/mutex/alpha.dec25
-rw-r--r--db2/mutex/alpha.gcc52
-rw-r--r--db2/mutex/mutex.c280
-rw-r--r--db2/mutex/parisc.gcc40
-rw-r--r--db2/mutex/parisc.hp29
-rw-r--r--db2/mutex/sparc.gcc33
-rw-r--r--db2/mutex/uts4.cc.s21
-rw-r--r--db2/mutex/x86.gcc17
10 files changed, 621 insertions, 0 deletions
diff --git a/db2/mutex/68020.gcc b/db2/mutex/68020.gcc
new file mode 100644
index 0000000000..9d8be641d8
--- /dev/null
+++ b/db2/mutex/68020.gcc
@@ -0,0 +1,19 @@
+/*
+ * @(#)68020.gcc 10.1 (Sleepycat) 4/12/97
+ *
+ * For gcc/68K, 0 is clear, 1 is set.
+ */
+#define TSL_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("tas %1; \n \
+ seq %0" \
+ : "=dm" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define TSL_UNSET(tsl) (*(tsl) = 0)
+#define TSL_INIT(tsl) TSL_UNSET(tsl)
+
diff --git a/db2/mutex/README b/db2/mutex/README
new file mode 100644
index 0000000000..30d6b6a7d1
--- /dev/null
+++ b/db2/mutex/README
@@ -0,0 +1,105 @@
+# @(#)README 10.1 (Sleepycat) 4/12/97
+
+Resource locking routines: lock based on a db_mutex_t. All this gunk
+(including trying to make assembly code portable), is necessary because
+System V semaphores require system calls for uncontested locks and we
+don't want to make two system calls per resource lock.
+
+First, this is how it works. The db_mutex_t structure contains a resource
+test-and-set lock (tsl), a file offset, a pid for debugging and statistics
+information.
+
+If HAVE_SPINLOCKS is defined (i.e. we know how to do test-and-sets for
+this compiler/architecture combination), we try and lock the resource tsl
+TSL_DEFAULT_SPINS times. If we can't acquire the lock that way, we use
+a system call to sleep for 10ms, 20ms, 40ms, etc. (The time is bounded
+at 1 second, just in case.) Using the timer backoff means that there are
+two assumptions: that locks are held for brief periods (never over system
+calls or I/O) and that locks are not hotly contested.
+
+If HAVE_SPINLOCKS is not defined, i.e. we can't do test-and-sets, we use
+a file descriptor to do byte locking on a file at a specified offset. In
+this case, ALL of the locking is done in the kernel. Because file
+descriptors are allocated per process, we have to provide the file
+descriptor as part of the lock/unlock call. We still have to do timer
+backoff because we need to be able to block ourselves, i.e. the lock
+manager causes processes to wait by having the process acquire a mutex
+and then attempting to re-acquire the mutex. There's no way to use kernel
+locking to block yourself, i.e. if you hold a lock and attempt to
+re-acquire it, the attempt will succeed.
+
+Next, let's talk about why it doesn't work the way a reasonable person
+would think it should work.
+
+Ideally, we'd have the ability to try to lock the resource tsl, and if
+that fails, increment a counter of waiting processes, then block in the
+kernel until the tsl is released. The process holding the resource tsl
+would see the wait counter when it went to release the resource tsl, and
+would wake any waiting processes up after releasing the lock. This would
+actually require both another tsl (call it the mutex tsl) and
+synchronization between the call that blocks in the kernel and the actual
+resource tsl. The mutex tsl would be used to protect accesses to the
+db_mutex_t itself. Locking the mutex tsl would be done by a busy loop,
+which is safe because processes would never block holding that tsl (all
+they would do is try to obtain the resource tsl and set/check the wait
+count). The problem in this model is that the blocking call into the
+kernel requires a blocking semaphore, i.e. one whose normal state is
+locked.
+
+The only portable forms of locking under UNIX are fcntl(2) on a file
+descriptor/offset, and System V semaphores. Neither of these locking
+methods are sufficient to solve the problem.
+
+The problem with fcntl locking is that only the process that obtained the
+lock can release it. Remember, we want the normal state of the kernel
+semaphore to be locked. So, if the creator of the db_mutex_t were to
+initialize the lock to "locked", then a second process locks the resource
+tsl, and then a third process needs to block, waiting for the resource
+tsl, when the second process wants to wake up the third process, it can't
+because it's not the holder of the lock! For the second process to be
+the holder of the lock, we would have to make a system call per
+uncontested lock, which is what we were trying to get away from in the
+first place.
+
+There are some hybrid schemes, such as signaling the holder of the lock,
+or using a different blocking offset depending on which process is
+holding the lock, but it gets complicated fairly quickly. I'm open to
+suggestions, but I'm not holding my breath.
+
+Regardless, we use this form of locking when HAVE_SPINLOCKS is not
+defined, (i.e. we're locking in the kernel) because it doesn't have the
+limitations found in System V semaphores, and because the normal state of
+the kernel object in that case is unlocked, so the process releasing the
+lock is also the holder of the lock.
+
+The System V semaphore design has a number of other limitations that make
+it inappropriate for this task. Namely:
+
+First, the semaphore key name space is separate from the file system name
+space (although there exist methods for using file names to create
+semaphore keys). If we use a well-known key, there's no reason to believe
+that any particular key will not already be in use, either by another
+instance of the DB application or some other application, in which case
+the DB application will fail. If we create a key, then we have to use a
+file system name to rendezvous and pass around the key.
+
+Second, System V semaphores traditionally have compile-time, system-wide
+limits on the number of semaphore keys that you can have. Typically, that
+number is far too low for any practical purpose. Since the semaphores
+permit more than a single slot per semaphore key, we could try and get
+around that limit by using multiple slots, but that means that the file
+that we're using for rendezvous is going to have to contain slot
+information as well as semaphore key information, and we're going to be
+reading/writing it on every db_mutex_t init or destroy operation. Anyhow,
+similar compile-time, system-wide limits on the numbers of slots per
+semaphore key kick in, and you're right back where you started.
+
+My fantasy is that once POSIX.1 standard mutexes are in wide-spread use,
+we can switch to them. My guess is that it won't happen, because the
+POSIX semaphores are only required to work for threads within a process,
+and not independent processes.
+
+Note: there are races in the statistics code, but since it's just that,
+I didn't bother fixing them. (The fix requires a mutex tsl, so, when/if
+this code is fixed to do rational locking (see above), then change the
+statistics update code to acquire/release the mutex tsl.
diff --git a/db2/mutex/alpha.dec b/db2/mutex/alpha.dec
new file mode 100644
index 0000000000..83ed371136
--- /dev/null
+++ b/db2/mutex/alpha.dec
@@ -0,0 +1,25 @@
+/*
+ * @(#)alpha.dec 8.3 (Sleepycat Software) 1/18/97
+ *
+ * The DEC C asm acts as a pseudo-call. The first argument is the assembly
+ * code, and the remaining arguments are assigned as in a procedure call, to
+ * r16, r17, etc. (represented in asm as %a0, %a1, and so forth).
+ *
+ * From: Dave Butenhof.
+ */
+
+#include <c_asm.h>
+
+#define TSL_SET(tsl) (asm ("mb; \
+ 10: ldl_l %v0,(%a0) ; \
+ bne %v0,30f ; \
+ or %v0,1,%r1 ; \
+ stl_c %r1,(%a0) ; \
+ beq %r1,20f ; \
+ mb ; \
+ br %r31,30f ; \
+ 20: br %r31,10b ; \
+ 30: ", (tsl)))
+
+THIS WAS NOT CONVERTED TO TAKE A POINTER AS AN ARGUMENT...
+#define TSL_UNSET(tsl) (asm ("mb"), *(tsl) = 0)
diff --git a/db2/mutex/alpha.gcc b/db2/mutex/alpha.gcc
new file mode 100644
index 0000000000..247d04cf31
--- /dev/null
+++ b/db2/mutex/alpha.gcc
@@ -0,0 +1,52 @@
+/*
+ * @(#)alpha.gcc 10.1 (Sleepycat) 4/12/97
+ *
+ * The code appearing below is taken from Richard L. Sites, ed. "Alpha
+ * Architecture Reference Manual", Digital Press, 1992, page 5-7 and 5-8.
+ * There are 2 modifications:
+ *
+ * 1. The jump from blbs __r1,30f to !__r1, which is dictated by the way the
+ * TSL_SET macro is used. The code suggested in Sites includes the main loop
+ * of the spin lock, whereas in this code the rest the loop is specified in C.
+ * The generated code might be suboptimal if the compiler generates a forward
+ * branch for the usual case in which the mutex is uncontested.
+ *
+ * 2. At label 20, Sites suggests including code for testing for an excessive
+ * number of _processor_ lock conflicts. (The seq_c instruction stores its
+ * first argument provided that no other processor has written to a byte range
+ * including its memory-location argument.) Absent such checking the code
+ * below could conceivably stall silently on a multiprocessor alpha, depending
+ * on how often processor/processor conflicts occur in a particular byte range.
+ *
+ * Note that the mb ("memory-barrier") instruction in TSL_UNSET is critical to
+ * correct operation in a multiprocessor alpha (as is, of course, the mb in
+ * the TSL_SET macro). Without the mb, changes to shared memory that occurred
+ * inside the critical section (before the TSL_UNSET) might reach shared memory
+ * _after_ the change of tsl to 0, thereby permitting another processor to see
+ * an inconsistent view of the data protected by the mutex.
+ *
+ * For gcc/alpha, 0 is clear, 1 is set.
+ */
+#define TSL_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ register tsl_t __r1, __r2; \
+ __asm__ volatile(" \n\
+ 10: ldq_l %0,(%2) \n\
+ blbs %0,30f \n\
+ or %0,1,%1 \n\
+ stq_c %1,(%2) \n\
+ beq %1,20f \n\
+ mb \n\
+ br 30f \n\
+ 20: br 10b \n\
+ 30: " \
+ : "=&r" (__r1), "=&r" (__r2) \
+ : "r" (__l)); \
+ !__r1; \
+})
+
+#define TSL_UNSET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ __asm__ volatile("mb; stq $31,(%0);" : : "r" (__l)); \
+})
+#define TSL_INIT(tsl) TSL_UNSET(tsl)
diff --git a/db2/mutex/mutex.c b/db2/mutex/mutex.c
new file mode 100644
index 0000000000..b23f738ad7
--- /dev/null
+++ b/db2/mutex/mutex.c
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "config.h"
+
+#ifndef lint
+static const char sccsid[] = "@(#)mutex.c 10.22 (Sleepycat) 8/21/97";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+#ifdef HAVE_SPINLOCKS
+
+#ifdef HAVE_FUNC_AIX
+#define TSL_INIT(x)
+#define TSL_SET(x) (!_check_lock(x, 0, 1))
+#define TSL_UNSET(x) _clear_lock(x, 0)
+#endif
+
+#ifdef HAVE_ASSEM_MC68020_GCC
+#include "68020.gcc"
+#endif
+
+#if defined(HAVE_FUNC_MSEM)
+/*
+ * XXX
+ * Should we not use MSEM_IF_NOWAIT and let the system block for us?
+ * I've no idea if this will block all threads in the process or not.
+ */
+#define TSL_INIT(x) msem_init(x, MSEM_UNLOCKED)
+#define TSL_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
+#define TSL_UNSET(x) msem_unlock(x, 0)
+#endif
+
+#ifdef HAVE_FUNC_SGI
+#define TSL_INIT(x) init_lock(x)
+#define TSL_SET(x) (!acquire_lock(x))
+#define TSL_UNSET(x) release_lock(x)
+#endif
+
+#ifdef HAVE_FUNC_SOLARIS
+/*
+ * Semaphore calls don't work on Solaris 5.5.
+ *
+ * #define TSL_INIT(x) sema_init(x, 1, USYNC_PROCESS, NULL)
+ * #define TSL_SET(x) (sema_wait(x) == 0)
+ * #define TSL_UNSET(x) sema_post(x)
+ */
+#define TSL_INIT(x)
+#define TSL_SET(x) (_lock_try(x))
+#define TSL_UNSET(x) _lock_clear(x)
+#endif
+
+#ifdef HAVE_ASSEM_SPARC_GCC
+#include "sparc.gcc"
+#endif
+
+#ifdef HAVE_ASSEM_UTS4_CC
+#define TSL_INIT(x)
+#define TSL_SET(x) (!uts_lock(x, 1))
+#define TSL_UNSET(x) (*(x) = 0)
+#endif
+
+#ifdef HAVE_ASSEM_X86_GCC
+#include "x86.gcc"
+#endif
+
+#if defined(_WIN32)
+/* DBDB this needs to be byte-aligned!! */
+#define TSL_INIT(tsl)
+#define TSL_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
+#define TSL_UNSET(tsl) (*(tsl) = 0)
+#endif
+
+#ifdef macintosh
+/* Mac spinlocks are simple because we cannot possibly be preempted. */
+#define TSL_INIT(tsl)
+#define TSL_SET(tsl) (*(tsl) = 1)
+#define TSL_UNSET(tsl) (*(tsl) = 0)
+#endif
+
+#endif /* HAVE_SPINLOCKS */
+
+#ifdef MORE_THAN_ONE_PROCESSOR
+#define TSL_DEFAULT_SPINS 5 /* Default spins before block. */
+#else
+#define TSL_DEFAULT_SPINS 1 /* Default spins before block. */
+#endif
+
+/*
+ * __db_mutex_init --
+ * Initialize a DB mutex structure.
+ *
+ * PUBLIC: void __db_mutex_init __P((db_mutex_t *, off_t));
+ */
+void
+__db_mutex_init(mp, off)
+ db_mutex_t *mp;
+ off_t off;
+{
+#ifdef DEBUG
+ if ((ALIGNTYPE)mp & (MUTEX_ALIGNMENT - 1)) {
+ (void)fprintf(stderr,
+ "MUTEX ERROR: mutex NOT %d-byte aligned!\n",
+ MUTEX_ALIGNMENT);
+ abort();
+ }
+#endif
+ memset(mp, 0, sizeof(db_mutex_t));
+
+#ifdef HAVE_SPINLOCKS
+ TSL_INIT(&mp->tsl_resource);
+#else
+ mp->off = off;
+#endif
+}
+
+#define MS(n) ((n) * 1000) /* Milliseconds to micro-seconds. */
+#define SECOND (MS(1000)) /* A second's worth of micro-seconds. */
+
+/*
+ * __db_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_mutex_lock __P((db_mutex_t *, int, int (*)(void)));
+ */
+int
+__db_mutex_lock(mp, fd, yield)
+ db_mutex_t *mp;
+ int fd;
+ int (*yield) __P((void));
+{
+ u_long usecs;
+
+#ifdef HAVE_SPINLOCKS
+ int nspins;
+
+ for (usecs = MS(10);;) {
+ /*
+ * Try and acquire the uncontested resource lock for
+ * TSL_DEFAULT_SPINS.
+ */
+ for (nspins = TSL_DEFAULT_SPINS; nspins > 0; --nspins)
+ if (TSL_SET(&mp->tsl_resource)) {
+#ifdef DEBUG
+ if (mp->pid != 0) {
+ (void)fprintf(stderr,
+ "MUTEX ERROR: __db_mutex_lock: lock currently locked\n");
+ abort();
+ }
+ mp->pid = getpid();
+#endif
+#ifdef MUTEX_STATISTICS
+ if (usecs == MS(10))
+ ++mp->mutex_set_nowait;
+ else
+ ++mp->mutex_set_wait;
+#endif
+ return (0);
+ }
+
+ /* Yield the processor; wait 10ms initially, up to 1 second. */
+ if (yield == NULL || yield() != 0) {
+ (void)__db_sleep(0, usecs);
+ if ((usecs <<= 1) > SECOND)
+ usecs = SECOND;
+ }
+ }
+ /* NOTREACHED */
+
+#else /* !HAVE_SPINLOCKS */
+ struct flock k_lock;
+ pid_t mypid;
+ int locked;
+
+ /* Initialize the lock. */
+ k_lock.l_whence = SEEK_SET;
+ k_lock.l_start = mp->off;
+ k_lock.l_len = 1;
+
+ for (locked = 0, mypid = getpid();;) {
+ /*
+ * Wait for the lock to become available; wait 10ms initially,
+ * up to 1 second.
+ */
+ for (usecs = MS(10); mp->pid != 0;)
+ if (yield == NULL || yield() != 0) {
+ (void)__db_sleep(0, usecs);
+ if ((usecs <<= 1) > SECOND)
+ usecs = SECOND;
+ }
+
+ /* Acquire an exclusive kernel lock. */
+ k_lock.l_type = F_WRLCK;
+ if (fcntl(fd, F_SETLKW, &k_lock))
+ return (1);
+
+ /* If the resource tsl is still available, it's ours. */
+ if (mp->pid == 0) {
+ locked = 1;
+ mp->pid = mypid;
+ }
+
+ /* Release the kernel lock. */
+ k_lock.l_type = F_UNLCK;
+ if (fcntl(fd, F_SETLK, &k_lock))
+ return (1);
+
+ /*
+ * If we got the resource tsl we're done.
+ *
+ * !!!
+ * We can't check to see if the lock is ours, because we may
+ * be trying to block ourselves in the lock manager, and so
+ * the holder of the lock that's preventing us from getting
+ * the lock may be us! (Seriously.)
+ */
+ if (locked)
+ break;
+ }
+
+#ifdef MUTEX_STATISTICS
+ ++mp->mutex_set_wait;
+#endif
+ return (0);
+#endif /* !HAVE_SPINLOCKS */
+}
+
+/*
+ * __db_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_mutex_unlock __P((db_mutex_t *, int));
+ */
+int
+__db_mutex_unlock(mp, fd)
+ db_mutex_t *mp;
+ int fd;
+{
+#ifdef DEBUG
+ if (mp->pid == 0) {
+ (void)fprintf(stderr,
+ "MUTEX ERROR: __db_mutex_unlock: lock already unlocked\n");
+ abort();
+ }
+#endif
+
+#ifdef HAVE_SPINLOCKS
+#ifdef DEBUG
+ mp->pid = 0;
+#endif
+
+ /* Release the resource tsl. */
+ TSL_UNSET(&mp->tsl_resource);
+#else
+ /*
+ * Release the resource tsl. We don't have to acquire any locks
+ * because processes trying to acquire the lock are checking for
+ * a pid of 0, not a specific value.
+ */
+ mp->pid = 0;
+#endif
+ return (0);
+}
diff --git a/db2/mutex/parisc.gcc b/db2/mutex/parisc.gcc
new file mode 100644
index 0000000000..e15f6f2dba
--- /dev/null
+++ b/db2/mutex/parisc.gcc
@@ -0,0 +1,40 @@
+/*
+ * @(#)parisc.gcc 8.5 (Sleepycat) 1/18/97
+ *
+ * Copyright (c) 1996-1997, The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the Computer
+ * Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+
+/*
+ * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
+ * The 32-bit word used by that instruction must be 16-byte aligned hence we
+ * allocate 16 bytes for a tsl_t and use the word that is properly aligned.
+ * We could use the "aligned" attribute in GCC but that doesn't work for stack
+ * variables.
+ */
+#define TSL_SET(tsl) ({ \
+ int *__l = (int *)(((int)(tsl)+15)&~15); \
+ int __r; \
+ asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
+ __r & 1; \
+})
+
+#define TSL_UNSET(tsl) ({ \
+ int *__l = (int *)(((int)(tsl)+15)&~15); \
+ *__l = -1; \
+})
diff --git a/db2/mutex/parisc.hp b/db2/mutex/parisc.hp
new file mode 100644
index 0000000000..d10807b7f1
--- /dev/null
+++ b/db2/mutex/parisc.hp
@@ -0,0 +1,29 @@
+/*
+ * @(#)parisc.hp 8.5 (Sleepycat) 1/18/97
+ *
+ * Copyright (c) 1996-1997, The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the Computer
+ * Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+
+/*
+ * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
+ * The 32-bit word used by that instruction must be 16-byte aligned hence we
+ * allocate 16 bytes for a tsl_t and use the word that is properly aligned.
+ */
+#define TSL_SET(tsl) tsl_set(tsl)
+#define TSL_UNSET(tsl) tsl_unset(tsl)
diff --git a/db2/mutex/sparc.gcc b/db2/mutex/sparc.gcc
new file mode 100644
index 0000000000..8445a0629b
--- /dev/null
+++ b/db2/mutex/sparc.gcc
@@ -0,0 +1,33 @@
+/*
+ * @(#)sparc.gcc 10.1 (Sleepycat) 4/12/97
+ *
+ * The ldstub instruction takes the location specified by its first argument
+ * (a register containing a memory address) and loads its contents into its
+ * second argument (a register) and atomically sets the contents the location
+ * specified by its first argument to a byte of 1s. (The value in the second
+ * argument is never read, but only overwritten.)
+ *
+ * The membar instructions are needed to ensure that writes to the lock are
+ * correctly ordered with writes that occur later in the instruction stream.
+ *
+ * For gcc/sparc, 0 is clear, 1 is set.
+ */
+
+#if defined(__sparcv9__)
+Does the following code need membar instructions for V9 processors?
+#endif
+
+#define TSL_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ register tsl_t __r; \
+ __asm__ volatile \
+ ("ldstub [%1],%0" \
+ : "=r"( __r) : "r" (__l)); \
+ !__r; \
+})
+
+#define TSL_UNSET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ __asm__ volatile ("stb %%g0,[%0]" : : "r" (__l)); \
+})
+#define TSL_INIT(tsl) TSL_UNSET(tsl)
diff --git a/db2/mutex/uts4.cc.s b/db2/mutex/uts4.cc.s
new file mode 100644
index 0000000000..ee5f4143bd
--- /dev/null
+++ b/db2/mutex/uts4.cc.s
@@ -0,0 +1,21 @@
+ /
+ / int uts_lock ( int *p, int i );
+ / Update the lock word pointed to by p with the
+ / value i, using compare-and-swap.
+ / Returns 0 if update was successful.
+ / Returns 1 if update failed.
+ /
+ entry uts_lock
+ uts_lock:
+ using .,r15
+ st r2,8(sp) / Save R2
+ l r2,64+0(sp) / R2 -> word to update
+ slr r0, r0 / R0 = current lock value must be 0
+ l r1,64+4(sp) / R1 = new lock value
+ cs r0,r1,0(r2) / Try the update ...
+ be x / ... Success. Return 0
+ la r0,1 / ... Failure. Return 1
+ x: /
+ l r2,8(sp) / Restore R2
+ b 2(,r14) / Return to caller
+ drop r15
diff --git a/db2/mutex/x86.gcc b/db2/mutex/x86.gcc
new file mode 100644
index 0000000000..886a6811a2
--- /dev/null
+++ b/db2/mutex/x86.gcc
@@ -0,0 +1,17 @@
+/*
+ * @(#)x86.gcc 10.2 (Sleepycat) 6/21/97
+ *
+ * For gcc/x86, 0 is clear, 1 is set.
+ */
+#define TSL_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("movl $1,%%eax; xchgb %1,%%al; xorl $1,%%eax" \
+ : "=&a" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define TSL_UNSET(tsl) (*(tsl) = 0)
+#define TSL_INIT(tsl) TSL_UNSET(tsl)