diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2016-08-23 20:05:04 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2016-08-23 20:06:06 +0200 |
commit | f0df4a42a77d15d4edfc95bb45035e81cb1bdbaa (patch) | |
tree | 88b352f9beb446566e08b531accd7f798205d314 /mach | |
parent | bde3bfa9f8d1b1c183e0ca66703b2fa3215eec21 (diff) |
Introduce gsync-based locks to glibc.
From: Agustina Arzille <avarzille@riseup.net>
* hurd/Makefile: Add hurdlock.
* hurd/Versions: Added new entry to export the above interface.
* hurd/hurdlock.c: New file.
* hurd/hurdlock.h: New file.
* hurd/hurdpid.c: Include <lowlevellock.h>
(_S_msg_proc_newids): Use lll_wait to synchronize.
* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
* hurd/sysvshm.c: Include <hurdlock.h>, use integer for synchronization.
* mach/Makefile: Remove unneeded file
* mach/lock-intern.h: Use lll to implement spinlocks.
* mach/lowlevellock.h: New file
* mach/mutex-init.c: Rewrite mutex initialization.
* sysdeps/mach/Makefile: Add libmachuser as dependencies for some libs.
* sysdeps/mach/bits/libc-lock.h: Reimplemented libc internal locks
with lll, cleanup routines now use gcc's cleanup attribute
* sysdeps/mach/hurd/bits/errno.h: New errno values.
* sysdeps/mach/hurd/bits/libc-lock.h: Removed file.
* sysdeps/mach/hurd/malloc-machine.h: Reimplemented malloc locks.
* sysdeps/mach/hurd/setpgid.c: (setpgid): Use gsync for synchronization.
* sysdeps/mach/hurd/setsid.c: (setsid): Likewise.
Diffstat (limited to 'mach')
-rw-r--r-- | mach/Makefile | 2 | ||||
-rw-r--r-- | mach/lock-intern.h | 84 | ||||
-rw-r--r-- | mach/lowlevellock.h | 80 | ||||
-rw-r--r-- | mach/mutex-init.c | 7 |
4 files changed, 134 insertions, 39 deletions
diff --git a/mach/Makefile b/mach/Makefile index 51ba0765be..d76f751cc0 100644 --- a/mach/Makefile +++ b/mach/Makefile @@ -23,7 +23,7 @@ headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \ $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \ $(lock-headers) machine-sp.h lock = spin-solid spin-lock mutex-init mutex-solid -lock-headers = lock-intern.h machine-lock.h spin-lock.h +lock-headers = lock-intern.h spin-lock.h routines = $(mach-syscalls) $(mach-shortcuts) \ mach_init mig_strncpy msg \ mig-alloc mig-dealloc mig-reply \ diff --git a/mach/lock-intern.h b/mach/lock-intern.h index df3d1c4e2f..c69a7ed425 100644 --- a/mach/lock-intern.h +++ b/mach/lock-intern.h @@ -19,73 +19,92 @@ #define _LOCK_INTERN_H #include <sys/cdefs.h> -#include <machine-lock.h> +#if defined __USE_EXTERN_INLINES && defined _LIBC +#include <lowlevellock.h> +#endif #ifndef _EXTERN_INLINE #define _EXTERN_INLINE __extern_inline #endif +/* The type of a spin lock variable. */ +typedef unsigned int __spin_lock_t; + +/* Static initializer for spinlocks. */ +#define __SPIN_LOCK_INITIALIZER 0 /* Initialize LOCK. */ void __spin_lock_init (__spin_lock_t *__lock); -#ifdef __USE_EXTERN_INLINES +#if defined __USE_EXTERN_INLINES && defined _LIBC _EXTERN_INLINE void __spin_lock_init (__spin_lock_t *__lock) { - *__lock = __SPIN_LOCK_INITIALIZER; + *__lock = LLL_INITIALIZER; } #endif -/* Lock LOCK, blocking if we can't get it. */ -extern void __spin_lock_solid (__spin_lock_t *__lock); - /* Lock the spin lock LOCK. */ void __spin_lock (__spin_lock_t *__lock); -#ifdef __USE_EXTERN_INLINES +#if defined __USE_EXTERN_INLINES && defined _LIBC _EXTERN_INLINE void __spin_lock (__spin_lock_t *__lock) { - if (! __spin_try_lock (__lock)) - __spin_lock_solid (__lock); + lll_lock (__lock, 0); } #endif - -/* Name space-clean internal interface to mutex locks. - Code internal to the C library uses these functions to lock and unlock - mutex locks. These locks are of type `struct mutex', defined in - <cthreads.h>. The functions here are name space-clean. If the program - is linked with the cthreads library, `__mutex_lock_solid' and - `__mutex_unlock_solid' will invoke the corresponding cthreads functions - to implement real mutex locks. If not, simple stub versions just use - spin locks. */ +/* Unlock LOCK. */ +void __spin_unlock (__spin_lock_t *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE void +__spin_unlock (__spin_lock_t *__lock) +{ + lll_unlock (__lock, 0); +} +#endif -/* Initialize the newly allocated mutex lock LOCK for further use. */ -extern void __mutex_init (void *__lock); +/* Try to lock LOCK; return nonzero if we locked it, zero if another has. */ +int __spin_try_lock (__spin_lock_t *__lock); -/* Lock LOCK, blocking if we can't get it. */ -extern void __mutex_lock_solid (void *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE int +__spin_try_lock (__spin_lock_t *__lock) +{ + return (lll_trylock (__lock) == 0); +} +#endif + +/* Return nonzero if LOCK is locked. */ +int __spin_lock_locked (__spin_lock_t *__lock); -/* Finish unlocking LOCK, after the spin lock LOCK->held has already been - unlocked. This function will wake up any thread waiting on LOCK. */ -extern void __mutex_unlock_solid (void *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE int +__spin_lock_locked (__spin_lock_t *__lock) +{ + return (*(volatile __spin_lock_t *)__lock != 0); +} +#endif + +/* Name space-clean internal interface to mutex locks. */ + +/* Initialize the newly allocated mutex lock LOCK for further use. */ +extern void __mutex_init (void *__lock); /* Lock the mutex lock LOCK. */ void __mutex_lock (void *__lock); -#ifdef __USE_EXTERN_INLINES +#if defined __USE_EXTERN_INLINES && defined _LIBC _EXTERN_INLINE void __mutex_lock (void *__lock) { - if (! __spin_try_lock ((__spin_lock_t *) __lock)) - __mutex_lock_solid (__lock); + __spin_lock ((__spin_lock_t *)__lock); } #endif @@ -93,23 +112,22 @@ __mutex_lock (void *__lock) void __mutex_unlock (void *__lock); -#ifdef __USE_EXTERN_INLINES +#if defined __USE_EXTERN_INLINES && defined _LIBC _EXTERN_INLINE void __mutex_unlock (void *__lock) { - __spin_unlock ((__spin_lock_t *) __lock); - __mutex_unlock_solid (__lock); + __spin_unlock ((__spin_lock_t *)__lock); } #endif int __mutex_trylock (void *__lock); -#ifdef __USE_EXTERN_INLINES +#if defined __USE_EXTERN_INLINES && defined _LIBC _EXTERN_INLINE int __mutex_trylock (void *__lock) { - return __spin_try_lock ((__spin_lock_t *) __lock); + return (__spin_try_lock ((__spin_lock_t *)__lock)); } #endif diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h new file mode 100644 index 0000000000..b6ce93927b --- /dev/null +++ b/mach/lowlevellock.h @@ -0,0 +1,80 @@ +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef __MACH_LOWLEVELLOCK_H__ +#define __MACH_LOWLEVELLOCK_H__ 1 + +#include <mach/gnumach.h> +#include <atomic.h> + +/* Gsync flags. */ +#ifndef GSYNC_SHARED + #define GSYNC_SHARED 0x01 + #define GSYNC_QUAD 0x02 + #define GSYNC_TIMED 0x04 + #define GSYNC_BROADCAST 0x08 + #define GSYNC_MUTATE 0x10 +#endif + +/* Static initializer for low-level locks. */ +#define LLL_INITIALIZER 0 + +/* Wait on address PTR, without blocking if its contents + * are different from VAL. */ +#define lll_wait(ptr, val, flags) \ + __gsync_wait (__mach_task_self (), \ + (vm_offset_t)(ptr), (val), 0, 0, (flags)) + +/* Wake one or more threads waiting on address PTR. */ +#define lll_wake(ptr, flags) \ + __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags)) + +/* Acquire the lock at PTR. */ +#define lll_lock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + int __flags = (flags); \ + if (*__iptr != 0 || \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \ + while (1) \ + { \ + if (atomic_exchange_acq (__iptr, 2) == 0) \ + break; \ + lll_wait (__iptr, 2, __flags); \ + } \ + (void)0; \ + }) + +/* Try to acquire the lock at PTR, without blocking. + * Evaluates to zero on success. */ +#define lll_trylock(ptr) \ + ({ \ + int *__iptr = (int *)(ptr); \ + *__iptr == 0 && \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \ + }) + +/* Release the lock at PTR. */ +#define lll_unlock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + if (atomic_exchange_rel (__iptr, 0) == 2) \ + lll_wake (__iptr, (flags)); \ + (void)0; \ + }) + +#endif diff --git a/mach/mutex-init.c b/mach/mutex-init.c index c68320d1dd..cb9ba0e33c 100644 --- a/mach/mutex-init.c +++ b/mach/mutex-init.c @@ -17,13 +17,10 @@ <http://www.gnu.org/licenses/>. */ #include <lock-intern.h> -#include <cthreads.h> +#include <lowlevellock.h> void __mutex_init (void *lock) { - /* This happens to be name space-safe because it is a macro. - It invokes only spin_lock_init, which is a macro for __spin_lock_init; - and cthread_queue_init, which is a macro for some simple code. */ - mutex_init ((struct mutex *) lock); + *(int *)lock = LLL_INITIALIZER; } |