diff options
-rw-r--r-- | Makefrag.am | 6 | ||||
-rw-r--r-- | configure.ac | 17 | ||||
-rw-r--r-- | kern/mutex.h | 6 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive.c | 138 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive_i.h | 120 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive_types.h | 35 | ||||
-rw-r--r-- | kern/mutex_types.h | 2 | ||||
-rw-r--r-- | kern/sleepq.c | 27 | ||||
-rw-r--r-- | kern/sleepq.h | 6 | ||||
-rw-r--r-- | kern/thread.c | 15 | ||||
-rw-r--r-- | kern/thread.h | 8 |
11 files changed, 377 insertions, 3 deletions
diff --git a/Makefrag.am b/Makefrag.am index c3f8d035..2ed9f771 100644 --- a/Makefrag.am +++ b/Makefrag.am @@ -46,6 +46,8 @@ x15_SOURCES += \ kern/macros.h \ kern/mutex.h \ kern/mutex_types.h \ + kern/mutex/mutex_adaptive_i.h \ + kern/mutex/mutex_adaptive_types.h \ kern/mutex/mutex_pi_i.h \ kern/mutex/mutex_pi_types.h \ kern/mutex/mutex_plain_i.h \ @@ -105,9 +107,13 @@ x15_SOURCES += \ kern/xcall.c \ kern/xcall.h +if MUTEX_ADAPTIVE + x15_SOURCES += kern/mutex/mutex_adaptive.c +else if !MUTEX_PI x15_SOURCES += kern/mutex/mutex_plain.c endif +endif x15_SOURCES += \ vm/vm_adv.h \ diff --git a/configure.ac b/configure.ac index dd4607c9..520fa02c 100644 --- a/configure.ac +++ b/configure.ac @@ -47,6 +47,10 @@ AC_ARG_WITH([max-cpus], [opt_max_cpus=$withval], [opt_max_cpus=128]) +AC_ARG_ENABLE([mutex-adaptive], + [AS_HELP_STRING([--enable-mutex-adaptive], + [enable adaptive spinning mutexes])]) + AC_ARG_ENABLE([mutex-pi], [AS_HELP_STRING([--enable-mutex-pi], [enable priority inheritance for regular mutexes @@ -100,11 +104,22 @@ AC_DEFINE_UNQUOTED([X15_MAX_CPUS], [$opt_max_cpus], [maximum number of supported processors]) AC_MSG_NOTICE([maximum number of supported processors: $opt_max_cpus]) -AS_IF([test x"$enable_mutex_pi" = xyes], +AS_IF([test x"$enable_mutex_adaptive" = xyes -a x"$enable_mutex_pi" = xyes], + [AC_MSG_ERROR([--enable-mutex-adaptive and --enable-mutex-pi are mutually exclusive])]) + +AS_IF([test x"$enable_mutex_adaptive" = xyes], + [mutex_impl="adaptive spinning"], + [test x"$enable_mutex_pi" = xyes], [mutex_impl="priority inheritance"], [mutex_impl=plain]) AC_MSG_NOTICE([mutex implementation: $mutex_impl]) +AS_IF([test x"$enable_mutex_adaptive" = xyes], + [AC_DEFINE_UNQUOTED([X15_MUTEX_ADAPTIVE], [], + [Enable adaptive mutexes])]) +AM_CONDITIONAL([MUTEX_ADAPTIVE], + [test x"$enable_mutex_adaptive" = xyes]) + AS_IF([test x"$enable_mutex_pi" = xyes], [AC_DEFINE_UNQUOTED([X15_MUTEX_PI], [], [Enable priority inheritance for regular mutexes])]) diff --git a/kern/mutex.h b/kern/mutex.h index e599e8a2..2936d82f 100644 --- a/kern/mutex.h +++ b/kern/mutex.h @@ -23,8 +23,14 @@ #ifndef _KERN_MUTEX_H #define _KERN_MUTEX_H +#if defined(X15_MUTEX_PI) && defined(X15_MUTEX_ADAPTIVE) +#error "only one of X15_MUTEX_PI and X15_MUTEX_ADAPTIVE may be defined" +#endif + #if defined(X15_MUTEX_PI) #include <kern/mutex/mutex_pi_i.h> +#elif defined(X15_MUTEX_ADAPTIVE) +#include <kern/mutex/mutex_adaptive_i.h> #else #include <kern/mutex/mutex_plain_i.h> #endif diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c new file mode 100644 index 00000000..f4274ce1 --- /dev/null +++ b/kern/mutex/mutex_adaptive.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> + +#include <kern/atomic.h> +#include <kern/mutex.h> +#include <kern/mutex_types.h> +#include <kern/sleepq.h> +#include <kern/thread.h> +#include <machine/cpu.h> + +static struct thread * +mutex_adaptive_get_thread(uintptr_t owner) +{ + return (struct thread *)(owner & ~MUTEX_ADAPTIVE_CONTENDED); +} + +static void +mutex_adaptive_set_contended(struct mutex *mutex) +{ + atomic_or_acq_rel(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED); +} + +static inline bool +mutex_adaptive_is_owner(struct mutex *mutex, uintptr_t owner) +{ + uintptr_t prev; + + prev = atomic_load(&mutex->owner, ATOMIC_RELAXED); + return mutex_adaptive_get_thread(prev) == mutex_adaptive_get_thread(owner); +} + +void +mutex_adaptive_lock_slow(struct mutex *mutex) +{ + uintptr_t self, owner; + struct sleepq *sleepq; + unsigned long flags; + + self = (uintptr_t)thread_self(); + + sleepq = sleepq_lend(mutex, false, &flags); + + mutex_adaptive_set_contended(mutex); + + for (;;) { + owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, + self | MUTEX_ADAPTIVE_CONTENDED); + assert(owner & MUTEX_ADAPTIVE_CONTENDED); + + if (mutex_adaptive_get_thread(owner) == NULL) { + break; + } + + /* + * The owner may not return from the unlock function if a thread is + * spinning on it. + */ + while (mutex_adaptive_is_owner(mutex, owner)) { + if (thread_is_running(mutex_adaptive_get_thread(owner))) { + cpu_pause(); + } else { + sleepq_wait(sleepq, "mutex"); + } + } + } + + /* + * A potentially spinning thread wouldn't be accounted in the sleep queue, + * but the only potentially spinning thread is the new owner. + */ + if (sleepq_empty(sleepq)) { + atomic_store(&mutex->owner, self, ATOMIC_RELAXED); + } + + sleepq_return(sleepq, flags); +} + +void +mutex_adaptive_unlock_slow(struct mutex *mutex) +{ + uintptr_t owner; + struct sleepq *sleepq; + unsigned long flags; + + atomic_store(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE); + + for (;;) { + owner = atomic_load(&mutex->owner, ATOMIC_RELAXED); + + /* + * This only happens if another thread was able to become the new + * owner, in which case that thread isn't spinning on the current + * thread, i.e. there is no need for an additional reference. + */ + if (owner != MUTEX_ADAPTIVE_CONTENDED) { + break; + } + + /* + * Avoid contending with incoming threads that are about to spin/wait + * on the mutex. This is particularly expensive with queued locks. + * + * Also, this call returns NULL if another thread is currently spinning + * on the current thread, in which case the latter doesn't return, + * averting the need for an additional reference. + */ + sleepq = sleepq_tryacquire(mutex, false, &flags); + + if (sleepq != NULL) { + sleepq_signal(sleepq); + sleepq_release(sleepq, flags); + break; + } + + /* + * Acquiring the sleep queue may fail because of contention on + * unrelated objects. Retry. + */ + } +} diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h new file mode 100644 index 00000000..2dd06792 --- /dev/null +++ b/kern/mutex/mutex_adaptive_i.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _KERN_MUTEX_ADAPTIVE_I_H +#define _KERN_MUTEX_ADAPTIVE_I_H + +#ifndef _KERN_MUTEX_H +#error "don't include <kern/mutex/mutex_adaptive_i.h> directly," \ + " use <kern/mutex.h> instead" +#endif + +#include <stdint.h> + +#include <kern/assert.h> +#include <kern/atomic.h> +#include <kern/error.h> +#include <kern/macros.h> +#include <kern/mutex_types.h> +#include <kern/thread.h> + +/* + * Mutex flags. + * + * The "contended" flag indicates that threads are waiting for the mutex + * to be unlocked, potentially spinning on the owner. It forces threads + * trying to lock the mutex as well as the owner to take the slow path. + */ +#define MUTEX_ADAPTIVE_CONTENDED 0x1 + +static inline void +mutex_adaptive_init(struct mutex *mutex) +{ + mutex->owner = 0; +} + +#define mutex_adaptive_assert_locked(mutex) assert((mutex)->owner != 0) + +static inline int +mutex_adaptive_lock_fast(struct mutex *mutex) +{ + uintptr_t owner; + + owner = atomic_cas_acquire(&mutex->owner, 0, (uintptr_t)thread_self()); + + if (unlikely(owner != 0)) { + return ERROR_BUSY; + } + + return 0; +} + +static inline int +mutex_adaptive_unlock_fast(struct mutex *mutex) +{ + uintptr_t owner; + + owner = atomic_cas_release(&mutex->owner, (uintptr_t)thread_self(), 0); + + if (unlikely(owner & MUTEX_ADAPTIVE_CONTENDED)) { + return ERROR_BUSY; + } + + return 0; +} + +void mutex_adaptive_lock_slow(struct mutex *mutex); +void mutex_adaptive_unlock_slow(struct mutex *mutex); + +/* + * Interface exported to the public mutex header. + */ + +#define mutex_impl_init mutex_adaptive_init +#define mutex_impl_assert_locked mutex_adaptive_assert_locked + +static inline int +mutex_impl_trylock(struct mutex *mutex) +{ + return mutex_adaptive_lock_fast(mutex); +} + +static inline void +mutex_impl_lock(struct mutex *mutex) +{ + int error; + + error = mutex_adaptive_lock_fast(mutex); + + if (unlikely(error)) { + mutex_adaptive_lock_slow(mutex); + } +} + +static inline void +mutex_impl_unlock(struct mutex *mutex) +{ + int error; + + error = mutex_adaptive_unlock_fast(mutex); + + if (unlikely(error)) { + mutex_adaptive_unlock_slow(mutex); + } +} + +#endif /* _KERN_MUTEX_ADAPTIVE_I_H */ diff --git a/kern/mutex/mutex_adaptive_types.h b/kern/mutex/mutex_adaptive_types.h new file mode 100644 index 00000000..efbbf218 --- /dev/null +++ b/kern/mutex/mutex_adaptive_types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * Isolated type definition used to avoid inclusion circular dependencies. + */ + +#ifndef _KERN_MUTEX_ADAPTIVE_TYPES_H +#define _KERN_MUTEX_ADAPTIVE_TYPES_H + +#ifndef _KERN_MUTEX_TYPES_H +#error "don't include <kern/mutex/mutex_adaptive_types.h> directly," \ + " use <kern/mutex_types.h> instead" +#endif + +#include <stdint.h> + +struct mutex { + uintptr_t owner; +}; + +#endif /* _KERN_MUTEX_ADAPTIVE_TYPES_H */ diff --git a/kern/mutex_types.h b/kern/mutex_types.h index 432eab30..eb2bc339 100644 --- a/kern/mutex_types.h +++ b/kern/mutex_types.h @@ -23,6 +23,8 @@ #if defined(X15_MUTEX_PI) #include <kern/mutex/mutex_pi_types.h> +#elif defined(X15_MUTEX_ADAPTIVE) +#include <kern/mutex/mutex_adaptive_types.h> #else #include <kern/mutex/mutex_plain_types.h> #endif diff --git a/kern/sleepq.c b/kern/sleepq.c index 09548db6..5af3d064 100644 --- a/kern/sleepq.c +++ b/kern/sleepq.c @@ -257,6 +257,33 @@ sleepq_acquire(const void *sync_obj, bool condition, unsigned long *flags) return sleepq; } +struct sleepq * +sleepq_tryacquire(const void *sync_obj, bool condition, unsigned long *flags) +{ + struct sleepq_bucket *bucket; + struct sleepq *sleepq; + int error; + + assert(sync_obj != NULL); + + bucket = sleepq_bucket_get(sync_obj, condition); + + error = spinlock_trylock_intr_save(&bucket->lock, flags); + + if (error) { + return NULL; + } + + sleepq = sleepq_bucket_lookup(bucket, sync_obj); + + if (sleepq == NULL) { + spinlock_unlock_intr_restore(&bucket->lock, *flags); + return NULL; + } + + return sleepq; +} + void sleepq_release(struct sleepq *sleepq, unsigned long flags) { diff --git a/kern/sleepq.h b/kern/sleepq.h index e1f7863b..f55d8c4b 100644 --- a/kern/sleepq.h +++ b/kern/sleepq.h @@ -68,9 +68,15 @@ void sleepq_destroy(struct sleepq *sleepq); * * The condition argument must be true if the synchronization object * is a condition variable. + * + * Note that, in the case of the non-blocking variant, the call may also + * return NULL if internal state shared by unrelated synchronization + * objects is locked. */ struct sleepq * sleepq_acquire(const void *sync_obj, bool condition, unsigned long *flags); +struct sleepq * sleepq_tryacquire(const void *sync_obj, bool condition, + unsigned long *flags); void sleepq_release(struct sleepq *sleepq, unsigned long flags); /* diff --git a/kern/thread.c b/kern/thread.c index e6875586..7ce22fb7 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -551,7 +551,7 @@ thread_runq_get_next(struct thread_runq *runq) thread = thread_sched_ops[i].get_next(runq); if (thread != NULL) { - runq->current = thread; + atomic_store(&runq->current, thread, ATOMIC_RELAXED); return thread; } } @@ -571,7 +571,7 @@ thread_runq_set_next(struct thread_runq *runq, struct thread *thread) ops->set_next(runq, thread); } - runq->current = thread; + atomic_store(&runq->current, thread, ATOMIC_RELAXED); } static void @@ -2736,3 +2736,14 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor) thread_dtors[key] = dtor; *keyp = key; } + +bool +thread_is_running(const struct thread *thread) +{ + const struct thread_runq *runq; + + runq = thread->runq; + + return (runq != NULL) + && (atomic_load(&runq->current, ATOMIC_RELAXED) == thread); +} diff --git a/kern/thread.h b/kern/thread.h index 62708268..4d85c59d 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -724,4 +724,12 @@ thread_get_specific(unsigned int key) return thread_tsd_get(thread_self(), key); } +/* + * Return true if the given thread is running. + * + * Note that this check is speculative, and may not return an accurate + * result. It may only be used for optimistic optimizations. + */ +bool thread_is_running(const struct thread *thread); + #endif /* _KERN_THREAD_H */ |