summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-08-30 23:27:31 +0200
committerRichard Braun <rbraun@sceen.net>2017-08-31 00:43:37 +0200
commit5daf8db5209b0849a7bf43d9d823c67be9a52bcf (patch)
tree1481da1f05f7ef7ee7202f300adf7a1b7d46f01b
parenta9719450e83c2c64eecd097d82beb624948e9de9 (diff)
Add debugging code to the mutex modules
-rw-r--r--kern/mutex.c2
-rw-r--r--kern/mutex/mutex_adaptive.c99
-rw-r--r--kern/mutex/mutex_adaptive_i.h5
-rw-r--r--kern/mutex/mutex_pi_i.h2
-rw-r--r--kern/mutex/mutex_plain.c68
-rw-r--r--kern/mutex/mutex_plain_i.h5
-rw-r--r--kern/rtmutex.c73
-rw-r--r--kern/rtmutex.h3
-rw-r--r--kern/syscnt.c7
9 files changed, 263 insertions, 1 deletions
diff --git a/kern/mutex.c b/kern/mutex.c
index 62609768..bd548b88 100644
--- a/kern/mutex.c
+++ b/kern/mutex.c
@@ -16,6 +16,7 @@
*/
#include <kern/init.h>
+#include <kern/mutex.h>
#include <kern/thread.h>
static int __init
@@ -25,4 +26,5 @@ mutex_setup(void)
}
INIT_OP_DEFINE(mutex_setup,
+ INIT_OP_DEP(mutex_impl_setup, true),
INIT_OP_DEP(thread_setup_booter, true));
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
index 34fdd221..ceec8f10 100644
--- a/kern/mutex/mutex_adaptive.c
+++ b/kern/mutex/mutex_adaptive.c
@@ -23,12 +23,83 @@
#include <kern/atomic.h>
#include <kern/clock.h>
#include <kern/error.h>
+#include <kern/init.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
+#include <kern/syscnt.h>
#include <kern/thread.h>
#include <machine/cpu.h>
+/* Set to 1 to enable debugging */
+#define MUTEX_ADAPTIVE_DEBUG 0
+
+#if MUTEX_ADAPTIVE_DEBUG
+
+enum {
+ MUTEX_ADAPTIVE_SC_SPINS,
+ MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES,
+ MUTEX_ADAPTIVE_SC_WAIT_ERRORS,
+ MUTEX_ADAPTIVE_SC_DOWNGRADES,
+ MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES,
+ MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS,
+ MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT,
+ MUTEX_ADAPTIVE_SC_FAST_UNLOCKS,
+ MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS,
+ MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS,
+ MUTEX_ADAPTIVE_SC_SIGNALS,
+ MUTEX_ADAPTIVE_NR_SCS
+};
+
+static struct syscnt mutex_adaptive_sc_array[MUTEX_ADAPTIVE_NR_SCS];
+
+static void
+mutex_adaptive_register_sc(unsigned int index, const char *name)
+{
+ assert(index < ARRAY_SIZE(mutex_adaptive_sc_array));
+ syscnt_register(&mutex_adaptive_sc_array[index], name);
+}
+
+static void
+mutex_adaptive_setup_debug(void)
+{
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SPINS,
+ "mutex_adaptive_spins");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES,
+ "mutex_adaptive_wait_successes");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_WAIT_ERRORS,
+ "mutex_adaptive_wait_errors");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_DOWNGRADES,
+ "mutex_adaptive_downgrades");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES,
+ "mutex_adaptive_error_downgrades");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS,
+ "mutex_adaptive_error_clearbits");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT,
+ "mutex_adaptive_error_clearcont");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_FAST_UNLOCKS,
+ "mutex_adaptive_fast_unlocks");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS,
+ "mutex_adaptive_slow_unlocks");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS,
+ "mutex_adaptive_external_unlocks");
+ mutex_adaptive_register_sc(MUTEX_ADAPTIVE_SC_SIGNALS,
+ "mutex_adaptive_signals");
+}
+
+static void
+mutex_adaptive_inc_sc(unsigned int index)
+{
+ assert(index < ARRAY_SIZE(mutex_adaptive_sc_array));
+ syscnt_inc(&mutex_adaptive_sc_array[index]);
+}
+
+#else /* MUTEX_ADAPTIVE_DEBUG */
+#define mutex_adaptive_setup_debug()
+#define mutex_adaptive_inc_sc(x)
+#endif /* MUTEX_ADAPTIVE_DEBUG */
+
+
static struct thread *
mutex_adaptive_get_thread(uintptr_t owner)
{
@@ -81,6 +152,8 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
*/
while (mutex_adaptive_is_owner(mutex, owner)) {
if (thread_is_running(mutex_adaptive_get_thread(owner))) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SPINS);
+
if (timed && clock_time_occurred(ticks, clock_get_time())) {
error = ERROR_TIMEDOUT;
break;
@@ -113,13 +186,17 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
*/
if (error) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_WAIT_ERRORS);
+
if (sleepq_empty(sleepq)) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_DOWNGRADES);
owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
assert(owner & MUTEX_ADAPTIVE_CONTENDED);
thread = mutex_adaptive_get_thread(owner);
/* If there is an owner, try to clear the contended bit */
if (thread != NULL) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARBITS);
owner = atomic_cas(&mutex->owner, owner,
(uintptr_t)thread, ATOMIC_RELAXED);
assert(owner & MUTEX_ADAPTIVE_CONTENDED);
@@ -132,6 +209,7 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
* value of the mutex to become different from the contended bit.
*/
if (thread == NULL) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_ERROR_CLEARCONT);
owner = atomic_cas(&mutex->owner, owner, 0, ATOMIC_RELAXED);
assert(owner == MUTEX_ADAPTIVE_CONTENDED);
}
@@ -140,7 +218,10 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
goto out;
}
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_WAIT_SUCCESSES);
+
if (sleepq_empty(sleepq)) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_DOWNGRADES);
atomic_store(&mutex->owner, self, ATOMIC_RELAXED);
}
@@ -193,10 +274,13 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
continue;
}
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_FAST_UNLOCKS);
return;
}
}
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SLOW_UNLOCKS);
+
for (;;) {
owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
@@ -208,6 +292,7 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
* 2/ A timeout cleared the contended bit.
*/
if (owner != MUTEX_ADAPTIVE_CONTENDED) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_EXTERNAL_UNLOCKS);
break;
}
@@ -222,6 +307,7 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
sleepq = sleepq_tryacquire(mutex, false, &flags);
if (sleepq != NULL) {
+ mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SIGNALS);
sleepq_signal(sleepq);
sleepq_release(sleepq, flags);
break;
@@ -233,3 +319,16 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
*/
}
}
+
+static int
+mutex_adaptive_setup(void)
+{
+ mutex_adaptive_setup_debug();
+ return 0;
+}
+
+INIT_OP_DEFINE(mutex_adaptive_setup,
+#if MUTEX_ADAPTIVE_DEBUG
+ INIT_OP_DEP(syscnt_setup, true),
+#endif /* MUTEX_ADAPTIVE_DEBUG */
+);
diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h
index be822c24..a8598e60 100644
--- a/kern/mutex/mutex_adaptive_i.h
+++ b/kern/mutex/mutex_adaptive_i.h
@@ -28,6 +28,7 @@
#include <kern/atomic.h>
#include <kern/error.h>
+#include <kern/init.h>
#include <kern/macros.h>
#include <kern/mutex_types.h>
#include <kern/thread.h>
@@ -132,4 +133,8 @@ mutex_impl_unlock(struct mutex *mutex)
}
}
+#define mutex_impl_setup mutex_adaptive_setup
+
+INIT_OP_DECLARE(mutex_adaptive_setup);
+
#endif /* _KERN_MUTEX_ADAPTIVE_I_H */
diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h
index 616f09b7..2d5a2b6f 100644
--- a/kern/mutex/mutex_pi_i.h
+++ b/kern/mutex/mutex_pi_i.h
@@ -65,4 +65,6 @@ mutex_impl_unlock(struct mutex *mutex)
rtmutex_unlock(&mutex->rtmutex);
}
+#define mutex_impl_setup rtmutex_setup
+
#endif /* _KERN_MUTEX_PI_I_H */
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
index 58fc4878..6a7cbdc3 100644
--- a/kern/mutex/mutex_plain.c
+++ b/kern/mutex/mutex_plain.c
@@ -21,9 +21,58 @@
#include <stdint.h>
#include <kern/atomic.h>
+#include <kern/init.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
+#include <kern/syscnt.h>
+
+/* Set to 1 to enable debugging */
+#define MUTEX_PLAIN_DEBUG 0
+
+#if MUTEX_PLAIN_DEBUG
+
+enum {
+ MUTEX_PLAIN_SC_WAIT_SUCCESSES,
+ MUTEX_PLAIN_SC_WAIT_ERRORS,
+ MUTEX_PLAIN_SC_DOWNGRADES,
+ MUTEX_PLAIN_SC_ERROR_DOWNGRADES,
+ MUTEX_PLAIN_NR_SCS
+};
+
+static struct syscnt mutex_plain_sc_array[MUTEX_PLAIN_NR_SCS];
+
+static void
+mutex_plain_register_sc(unsigned int index, const char *name)
+{
+ assert(index < ARRAY_SIZE(mutex_plain_sc_array));
+ syscnt_register(&mutex_plain_sc_array[index], name);
+}
+
+static void
+mutex_plain_setup_debug(void)
+{
+ mutex_plain_register_sc(MUTEX_PLAIN_SC_WAIT_SUCCESSES,
+ "mutex_plain_wait_successes");
+ mutex_plain_register_sc(MUTEX_PLAIN_SC_WAIT_ERRORS,
+ "mutex_plain_wait_errors");
+ mutex_plain_register_sc(MUTEX_PLAIN_SC_DOWNGRADES,
+ "mutex_plain_downgrades");
+ mutex_plain_register_sc(MUTEX_PLAIN_SC_ERROR_DOWNGRADES,
+ "mutex_plain_error_downgrades");
+}
+
+static void
+mutex_plain_inc_sc(unsigned int index)
+{
+ assert(index < ARRAY_SIZE(mutex_plain_sc_array));
+ syscnt_inc(&mutex_plain_sc_array[index]);
+}
+
+#else /* MUTEX_PLAIN_DEBUG */
+#define mutex_plain_setup_debug()
+#define mutex_plain_inc_sc(x)
+#endif /* MUTEX_PLAIN_DEBUG */
static int
mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
@@ -56,7 +105,10 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
}
if (error) {
+ mutex_plain_inc_sc(MUTEX_PLAIN_SC_WAIT_ERRORS);
+
if (sleepq_empty(sleepq)) {
+ mutex_plain_inc_sc(MUTEX_PLAIN_SC_ERROR_DOWNGRADES);
atomic_cas(&mutex->state, MUTEX_CONTENDED,
MUTEX_LOCKED, ATOMIC_RELAXED);
}
@@ -64,7 +116,10 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
goto out;
}
+ mutex_plain_inc_sc(MUTEX_PLAIN_SC_WAIT_SUCCESSES);
+
if (sleepq_empty(sleepq)) {
+ mutex_plain_inc_sc(MUTEX_PLAIN_SC_DOWNGRADES);
atomic_store(&mutex->state, MUTEX_LOCKED, ATOMIC_RELAXED);
}
@@ -105,3 +160,16 @@ mutex_plain_unlock_slow(struct mutex *mutex)
sleepq_release(sleepq, flags);
}
+
+static int
+mutex_plain_setup(void)
+{
+ mutex_plain_setup_debug();
+ return 0;
+}
+
+INIT_OP_DEFINE(mutex_plain_setup,
+#if MUTEX_PLAIN_DEBUG
+ INIT_OP_DEP(syscnt_setup, true),
+#endif /* MUTEX_PLAIN_DEBUG */
+);
diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h
index 58e565ed..fe973086 100644
--- a/kern/mutex/mutex_plain_i.h
+++ b/kern/mutex/mutex_plain_i.h
@@ -28,6 +28,7 @@
#include <kern/atomic.h>
#include <kern/error.h>
+#include <kern/init.h>
#include <kern/mutex_types.h>
#define MUTEX_UNLOCKED 0
@@ -126,4 +127,8 @@ mutex_impl_unlock(struct mutex *mutex)
}
}
+#define mutex_impl_setup mutex_plain_setup
+
+INIT_OP_DECLARE(mutex_plain_setup);
+
#endif /* _KERN_MUTEX_PLAIN_I_H */
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index 0070b93f..9fcc4e0e 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -21,12 +21,64 @@
#include <stdint.h>
#include <kern/atomic.h>
+#include <kern/init.h>
+#include <kern/macros.h>
#include <kern/rtmutex.h>
#include <kern/rtmutex_i.h>
#include <kern/rtmutex_types.h>
#include <kern/thread.h>
#include <kern/turnstile.h>
+/* Set to 1 to enable debugging */
+#define RTMUTEX_DEBUG 0
+
+#if RTMUTEX_DEBUG
+
+enum {
+ RTMUTEX_SC_WAIT_SUCCESSES,
+ RTMUTEX_SC_WAIT_ERRORS,
+ RTMUTEX_SC_DOWNGRADES,
+ RTMUTEX_SC_ERROR_DOWNGRADES,
+ RTMUTEX_SC_CANCELED_DOWNGRADES,
+ RTMUTEX_NR_SCS
+};
+
+static struct syscnt rtmutex_sc_array[RTMUTEX_NR_SCS];
+
+static void
+rtmutex_register_sc(unsigned int index, const char *name)
+{
+ assert(index < ARRAY_SIZE(rtmutex_sc_array));
+ syscnt_register(&rtmutex_sc_array[index], name);
+}
+
+static void
+rtmutex_setup_debug(void)
+{
+ rtmutex_register_sc(RTMUTEX_SC_WAIT_SUCCESSES,
+ "rtmutex_wait_successes");
+ rtmutex_register_sc(RTMUTEX_SC_WAIT_ERRORS,
+ "rtmutex_wait_errors");
+ rtmutex_register_sc(RTMUTEX_SC_DOWNGRADES,
+ "rtmutex_downgrades");
+ rtmutex_register_sc(RTMUTEX_SC_ERROR_DOWNGRADES,
+ "rtmutex_error_downgrades");
+ rtmutex_register_sc(RTMUTEX_SC_CANCELED_DOWNGRADES,
+ "rtmutex_canceled_downgrades");
+}
+
+static void
+rtmutex_inc_sc(unsigned int index)
+{
+ assert(index < ARRAY_SIZE(rtmutex_sc_array));
+ syscnt_inc(&rtmutex_sc_array[index]);
+}
+
+#else /* RTMUTEX_DEBUG */
+#define rtmutex_setup_debug()
+#define rtmutex_inc_sc(x)
+#endif /* RTMUTEX_DEBUG */
+
static struct thread *
rtmutex_get_thread(uintptr_t owner)
{
@@ -81,6 +133,8 @@ rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
}
if (error) {
+ rtmutex_inc_sc(RTMUTEX_SC_WAIT_ERRORS);
+
/*
* Keep in mind more than one thread may have timed out on waiting.
* These threads aren't considered waiters, making the turnstile
@@ -91,17 +145,23 @@ rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED);
if (owner & RTMUTEX_CONTENDED) {
+ rtmutex_inc_sc(RTMUTEX_SC_ERROR_DOWNGRADES);
owner &= RTMUTEX_OWNER_MASK;
atomic_store(&rtmutex->owner, owner, ATOMIC_RELAXED);
+ } else {
+ rtmutex_inc_sc(RTMUTEX_SC_CANCELED_DOWNGRADES);
}
}
goto out;
}
+ rtmutex_inc_sc(RTMUTEX_SC_WAIT_SUCCESSES);
+
turnstile_own(turnstile);
if (turnstile_empty(turnstile)) {
+ rtmutex_inc_sc(RTMUTEX_SC_DOWNGRADES);
owner = atomic_swap(&rtmutex->owner, self, ATOMIC_RELAXED);
assert(owner == (self | bits));
}
@@ -166,3 +226,16 @@ rtmutex_unlock_slow(struct rtmutex *rtmutex)
/* TODO Make private, use thread_set_priority_propagation_needed instead */
thread_propagate_priority();
}
+
+static int
+rtmutex_setup(void)
+{
+ rtmutex_setup_debug();
+ return 0;
+}
+
+INIT_OP_DEFINE(rtmutex_setup,
+#if RTMUTEX_DEBUG
+ INIT_OP_DEP(syscnt_setup, true),
+#endif /* RTMUTEX_DEBUG */
+ );
diff --git a/kern/rtmutex.h b/kern/rtmutex.h
index 87cd15ad..bc76b773 100644
--- a/kern/rtmutex.h
+++ b/kern/rtmutex.h
@@ -28,6 +28,7 @@
#include <stdint.h>
#include <kern/error.h>
+#include <kern/init.h>
#include <kern/macros.h>
#include <kern/rtmutex_i.h>
#include <kern/rtmutex_types.h>
@@ -119,4 +120,6 @@ rtmutex_unlock(struct rtmutex *rtmutex)
}
}
+INIT_OP_DECLARE(rtmutex_setup);
+
#endif /* _KERN_RTMUTEX_H */
diff --git a/kern/syscnt.c b/kern/syscnt.c
index 7cceabac..0e3aff70 100644
--- a/kern/syscnt.c
+++ b/kern/syscnt.c
@@ -25,6 +25,7 @@
#include <kern/shell.h>
#include <kern/spinlock.h>
#include <kern/syscnt.h>
+#include <kern/thread.h>
/*
* Global list of all registered counters.
@@ -70,8 +71,12 @@ syscnt_setup(void)
return 0;
}
+/*
+ * Do not make initialization depend on mutex_setup, since mutex
+ * modules may use system counters for debugging.
+ */
INIT_OP_DEFINE(syscnt_setup,
- INIT_OP_DEP(mutex_setup, true));
+ INIT_OP_DEP(thread_setup_booter, true));
void __init
syscnt_register(struct syscnt *syscnt, const char *name)