summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2019-05-28 23:59:29 +0200
committerRichard Braun <rbraun@sceen.net>2019-05-28 23:59:29 +0200
commit096b2005752bce5c8bca1256dd4c5a42e0ec059c (patch)
treece18b12d88187d813d629d9bf6edb2b6da1a8980
parentd44f65873b1a00f450346ac9506bcc65ecc2c5d5 (diff)
kern/sref: add optional debugging
-rw-r--r--kern/Kconfig7
-rw-r--r--kern/sref.c95
-rw-r--r--kern/sref_i.h18
3 files changed, 85 insertions, 35 deletions
diff --git a/kern/Kconfig b/kern/Kconfig
index 782b0ec4..cc73ef53 100644
--- a/kern/Kconfig
+++ b/kern/Kconfig
@@ -164,4 +164,11 @@ config SPINLOCK_DEBUG
---help---
Enable spinlock ownership tracking.
+config SREF_DEBUG
+ bool "Scalable reference counter debugging"
+ select ASSERT
+ default n
+ ---help---
+ Enable scalable reference counter debugging
+
endmenu
diff --git a/kern/sref.c b/kern/sref.c
index 31bfa0d0..c1093dcc 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -319,39 +319,55 @@ sref_counter_hash(const struct sref_counter *counter)
static bool
sref_counter_is_queued(const struct sref_counter *counter)
{
- return counter->flags & SREF_QUEUED;
+ return counter->flags & SREF_CNTF_QUEUED;
}
static void
sref_counter_mark_queued(struct sref_counter *counter)
{
- counter->flags |= SREF_QUEUED;
+ counter->flags |= SREF_CNTF_QUEUED;
}
static void
sref_counter_clear_queued(struct sref_counter *counter)
{
- counter->flags &= ~SREF_QUEUED;
+ counter->flags &= ~SREF_CNTF_QUEUED;
}
static bool
sref_counter_is_dirty(const struct sref_counter *counter)
{
- return counter->flags & SREF_DIRTY;
+ return counter->flags & SREF_CNTF_DIRTY;
}
static void
sref_counter_mark_dirty(struct sref_counter *counter)
{
- counter->flags |= SREF_DIRTY;
+ counter->flags |= SREF_CNTF_DIRTY;
}
static void
sref_counter_clear_dirty(struct sref_counter *counter)
{
- counter->flags &= ~SREF_DIRTY;
+ counter->flags &= ~SREF_CNTF_DIRTY;
}
+#ifdef SREF_VERIFY
+
+static bool
+sref_counter_is_unreferenced(const struct sref_counter *counter)
+{
+ return counter->flags & SREF_CNTF_UNREF;
+}
+
+static void
+sref_counter_mark_unreferenced(struct sref_counter *counter)
+{
+ counter->flags |= SREF_CNTF_UNREF;
+}
+
+#endif /* SREF_VERIFY */
+
static void
sref_counter_mark_dying(struct sref_counter *counter)
{
@@ -726,6 +742,10 @@ sref_queue_review(struct sref_queue *queue, struct sref_cache *cache)
spinlock_lock_intr_save(&counter->lock, &flags);
+#ifdef SREF_VERIFY
+ assert(!sref_counter_is_unreferenced(counter));
+#endif /* SREF_VERIFY */
+
assert(sref_counter_is_queued(counter));
sref_counter_clear_queued(counter);
@@ -733,37 +753,48 @@ sref_queue_review(struct sref_queue *queue, struct sref_cache *cache)
sref_counter_clear_dirty(counter);
sref_counter_clear_dying(counter);
spinlock_unlock_intr_restore(&counter->lock, flags);
+ continue;
+ }
+
+ if (sref_counter_is_dirty(counter)) {
+ requeue = true;
+ nr_dirty_zeroes++;
+ sref_counter_clear_dirty(counter);
} else {
- if (sref_counter_is_dirty(counter)) {
- requeue = true;
- nr_dirty_zeroes++;
- sref_counter_clear_dirty(counter);
- } else {
- error = sref_counter_kill_weakref(counter);
-
- if (!error) {
- requeue = false;
- } else {
- requeue = true;
- nr_revives++;
- }
- }
+ error = sref_counter_kill_weakref(counter);
- if (requeue) {
- sref_cache_schedule_review(cache, counter);
- spinlock_unlock_intr_restore(&counter->lock, flags);
+ if (!error) {
+ requeue = false;
} else {
- /*
- * Keep in mind that the work structure shares memory with
- * the counter data. Unlocking isn't needed here, since this
- * counter is now really at 0, but do it for consistency.
- */
- spinlock_unlock_intr_restore(&counter->lock, flags);
- nr_true_zeroes++;
- work_init(&counter->work, sref_counter_noref);
- work_queue_push(&works, &counter->work);
+ requeue = true;
+ nr_revives++;
}
}
+
+ if (requeue) {
+ sref_cache_schedule_review(cache, counter);
+ spinlock_unlock_intr_restore(&counter->lock, flags);
+ } else {
+
+ /*
+ * Keep in mind that the work structure shares memory with
+ * the counter data.
+ */
+
+#ifdef SREF_VERIFY
+ sref_counter_mark_unreferenced(counter);
+#endif /* SREF_VERIFY */
+
+ /*
+ * Unlocking isn't needed here, since this counter is now
+ * really at 0, but do it for consistency.
+ */
+ spinlock_unlock_intr_restore(&counter->lock, flags);
+
+ nr_true_zeroes++;
+ work_init(&counter->work, sref_counter_noref);
+ work_queue_push(&works, &counter->work);
+ }
}
if (work_queue_nr_works(&works) != 0) {
diff --git a/kern/sref_i.h b/kern/sref_i.h
index 7b2b07f2..ebcb51d3 100644
--- a/kern/sref_i.h
+++ b/kern/sref_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018 Richard Braun.
+ * Copyright (c) 2014-2019 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,10 @@
#include <kern/spinlock.h>
#include <kern/work.h>
+#ifdef CONFIG_SREF_DEBUG
+#define SREF_VERIFY
+#endif
+
#define SREF_WEAKREF_DYING ((uintptr_t)1)
#define SREF_WEAKREF_MASK (~SREF_WEAKREF_DYING)
@@ -42,8 +46,12 @@ struct sref_weakref {
uintptr_t addr;
};
-#define SREF_QUEUED 0x1
-#define SREF_DIRTY 0x2
+/*
+ * Counter flags.
+ */
+#define SREF_CNTF_QUEUED 0x1 /* Queued for review */
+#define SREF_CNTF_DIRTY 0x2 /* Dirty zero seen */
+#define SREF_CNTF_UNREF 0x4 /* Unreferenced, for debugging only */
/*
* Scalable reference counter.
@@ -60,7 +68,11 @@ struct sref_weakref {
struct sref_counter {
sref_noref_fn_t noref_fn;
+#ifdef SREF_VERIFY
+ struct {
+#else /* SREF_VERIFY */
union {
+#endif /* SREF_VERIFY */
struct {
struct slist_node node; /* (g) */
struct spinlock lock;