diff options
author | Richard Braun <rbraun@sceen.net> | 2017-08-30 23:27:31 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-08-31 00:43:37 +0200 |
commit | 5daf8db5209b0849a7bf43d9d823c67be9a52bcf (patch) | |
tree | 1481da1f05f7ef7ee7202f300adf7a1b7d46f01b /kern/mutex/mutex_adaptive.c | |
parent | a9719450e83c2c64eecd097d82beb624948e9de9 (diff) |
Add debugging code to the mutex modules
Diffstat (limited to 'kern/mutex/mutex_adaptive.c')
-rw-r--r-- | kern/mutex/mutex_adaptive.c | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c index 34fdd221..ceec8f10 100644 --- a/kern/mutex/mutex_adaptive.c +++ b/kern/mutex/mutex_adaptive.c @@ -23,12 +23,83 @@ #include <kern/atomic.h> #include <kern/clock.h> #include <kern/error.h> +#include <kern/init.h> #include <kern/mutex.h> #include <kern/mutex_types.h> #include <kern/sleepq.h> +#include <kern/syscnt.h> #include <kern/thread.h> #include <machine/cpu.h> +/* Set to 1 to enable debugging */ +#define MUTEX_ADAPTIVE_DEBUG 0 + +#if MUTEX_ADAPTIVE_DEBUG + +enum { + MUTEX_ADAPTIVE_SC_SPINS, + MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES, + MUTEX_ADAPTIVE_SC_WAIT_ERRORS, + MUTEX_ADAPTIVE_SC_DOWNGRADES, + MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES, + MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS, + MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT, + MUTEX_ADAPTIVE_SC_FAST_UNLOCKS, + MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS, + MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS, + MUTEX_ADAPTIVE_SC_SIGNALS, + MUTEX_ADAPTIVE_NR_SCS +}; + +static struct syscnt mutex_adaptive_sc_array[MUTEX_ADAPTIVE_NR_SCS]; + +static void +mutex_adaptive_register_sc(unsigned int index, const char *name) +{ + assert(index < ARRAY_SIZE(mutex_adaptive_sc_array)); + syscnt_register(&mutex_adaptive_sc_array[index], name); +} + +static void +mutex_adaptive_setup_debug(void) +{ + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SPINS, + "mutex_adaptive_spins"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES, + "mutex_adaptive_wait_successes"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_WAIT_ERRORS, + "mutex_adaptive_wait_errors"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_DOWNGRADES, + "mutex_adaptive_downgrades"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES, + "mutex_adaptive_error_downgrades"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS, + "mutex_adaptive_error_clearbits"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT, + "mutex_adaptive_error_clearcont"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_FAST_UNLOCKS, + "mutex_adaptive_fast_unlocks"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS, + "mutex_adaptive_slow_unlocks"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS, + "mutex_adaptive_external_unlocks"); + mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SIGNALS, + "mutex_adaptive_signals"); +} + +static void +mutex_adaptive_inc_sc(unsigned int index) +{ + assert(index < ARRAY_SIZE(mutex_adaptive_sc_array)); + syscnt_inc(&mutex_adaptive_sc_array[index]); +} + +#else /* MUTEX_ADAPTIVE_DEBUG */ +#define mutex_adaptive_setup_debug() +#define mutex_adaptive_inc_sc(x) +#endif /* MUTEX_ADAPTIVE_DEBUG */ + + static struct thread * mutex_adaptive_get_thread(uintptr_t owner) { @@ -81,6 +152,8 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) */ while (mutex_adaptive_is_owner(mutex, owner)) { if (thread_is_running(mutex_adaptive_get_thread(owner))) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SPINS); + if (timed && clock_time_occurred(ticks, clock_get_time())) { error = ERROR_TIMEDOUT; break; @@ -113,13 +186,17 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) */ if (error) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_WAIT_ERRORS); + if (sleepq_empty(sleepq)) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES); owner = atomic_load(&mutex->owner, ATOMIC_RELAXED); assert(owner & MUTEX_ADAPTIVE_CONTENDED); thread = mutex_adaptive_get_thread(owner); /* If there is an owner, try to clear the contended bit */ if (thread != NULL) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS); owner = atomic_cas(&mutex->owner, owner, (uintptr_t)thread, ATOMIC_RELAXED); assert(owner & MUTEX_ADAPTIVE_CONTENDED); @@ -132,6 +209,7 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) * value of the mutex to become different from the contended bit. */ if (thread == NULL) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT); owner = atomic_cas(&mutex->owner, owner, 0, ATOMIC_RELAXED); assert(owner == MUTEX_ADAPTIVE_CONTENDED); } @@ -140,7 +218,10 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks) goto out; } + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES); + if (sleepq_empty(sleepq)) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_DOWNGRADES); atomic_store(&mutex->owner, self, ATOMIC_RELAXED); } @@ -193,10 +274,13 @@ mutex_adaptive_unlock_slow(struct mutex *mutex) continue; } + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_FAST_UNLOCKS); return; } } + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS); + for (;;) { owner = atomic_load(&mutex->owner, ATOMIC_RELAXED); @@ -208,6 +292,7 @@ mutex_adaptive_unlock_slow(struct mutex *mutex) * 2/ A timeout cleared the contended bit. */ if (owner != MUTEX_ADAPTIVE_CONTENDED) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS); break; } @@ -222,6 +307,7 @@ mutex_adaptive_unlock_slow(struct mutex *mutex) sleepq = sleepq_tryacquire(mutex, false, &flags); if (sleepq != NULL) { + mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SIGNALS); sleepq_signal(sleepq); sleepq_release(sleepq, flags); break; @@ -233,3 +319,16 @@ mutex_adaptive_unlock_slow(struct mutex *mutex) */ } } + +static int +mutex_adaptive_setup(void) +{ + mutex_adaptive_setup_debug(); + return 0; +} + +INIT_OP_DEFINE(mutex_adaptive_setup, +#if MUTEX_ADAPTIVE_DEBUG + INIT_OP_DEP(syscnt_setup, true), +#endif /* MUTEX_ADAPTIVE_DEBUG */ +); |