summaryrefslogtreecommitdiff
path: root/kern/sref.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-01-10 00:56:49 +0100
committerRichard Braun <rbraun@sceen.net>2018-01-10 22:47:45 +0100
commitfb3beb7bf51adfbc30db1e2b1f0c6bfca08bea79 (patch)
tree9da5a6830b8e0a08d381f8f6cf910e21a6d823bc /kern/sref.c
parente82501678afa48d33ea6d45c31220bf6a42f3493 (diff)
kern/sref: optimize cache flushing
Track valid deltas in a list instead of walking the entire hash table.
Diffstat (limited to 'kern/sref.c')
-rw-r--r--kern/sref.c47
1 files changed, 35 insertions, 12 deletions
diff --git a/kern/sref.c b/kern/sref.c
index b64da76..b7cdab5 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -51,6 +51,7 @@
#include <kern/cpumap.h>
#include <kern/error.h>
#include <kern/init.h>
+#include <kern/list.h>
#include <kern/log.h>
#include <kern/macros.h>
#include <kern/panic.h>
@@ -124,6 +125,7 @@ struct sref_data {
* reliably reported.
*/
struct sref_delta {
+ struct list node;
struct sref_counter *counter;
unsigned long value;
};
@@ -156,6 +158,7 @@ struct sref_delta {
*/
struct sref_cache {
struct sref_delta deltas[SREF_MAX_DELTAS];
+ struct list valid_deltas;
struct syscnt sc_collisions;
struct syscnt sc_flushes;
struct thread *manager;
@@ -511,6 +514,7 @@ sref_cache_init(struct sref_cache *cache, unsigned int cpu)
sref_delta_init(delta);
}
+ list_init(&cache->valid_deltas);
snprintf(name, sizeof(name), "sref_collisions/%u", cpu);
syscnt_register(&cache->sc_collisions, name);
snprintf(name, sizeof(name), "sref_flushes/%u", cpu);
@@ -578,6 +582,26 @@ sref_cache_clear_dirty(struct sref_cache *cache)
cache->dirty = 0;
}
+static void
+sref_cache_add_delta(struct sref_cache *cache, struct sref_delta *delta,
+ struct sref_counter *counter)
+{
+ assert(!sref_delta_is_valid(delta));
+ assert(counter);
+
+ sref_delta_set_counter(delta, counter);
+ list_insert_tail(&cache->valid_deltas, &delta->node);
+}
+
+static void
+sref_cache_remove_delta(struct sref_delta *delta)
+{
+ assert(sref_delta_is_valid(delta));
+
+ sref_delta_evict(delta);
+ list_remove(&delta->node);
+}
+
static struct sref_delta *
sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter)
{
@@ -586,10 +610,10 @@ sref_cache_get_delta(struct sref_cache *cache, struct sref_counter *counter)
delta = sref_cache_delta(cache, sref_counter_index(counter));
if (!sref_delta_is_valid(delta)) {
- sref_delta_set_counter(delta, counter);
+ sref_cache_add_delta(cache, delta, counter);
} else if (sref_delta_counter(delta) != counter) {
- sref_delta_flush(delta);
- sref_delta_set_counter(delta, counter);
+ sref_cache_remove_delta(delta);
+ sref_cache_add_delta(cache, delta, counter);
syscnt_inc(&cache->sc_collisions);
}
@@ -600,20 +624,19 @@ static void
sref_cache_flush(struct sref_cache *cache, struct sref_queue *queue)
{
struct sref_delta *delta;
- unsigned int i, cpu;
-
- thread_preempt_disable();
+ unsigned int cpu;
- /* TODO Consider a list of valid deltas to speed things up */
- for (i = 0; i < ARRAY_SIZE(cache->deltas); i++) {
- delta = sref_cache_delta(cache, i);
+ for (;;) {
+ thread_preempt_disable();
- if (sref_delta_is_valid(delta)) {
- sref_delta_evict(delta);
+ if (list_empty(&cache->valid_deltas)) {
+ break;
}
+ delta = list_first_entry(&cache->valid_deltas, typeof(*delta), node);
+ sref_cache_remove_delta(delta);
+
thread_preempt_enable();
- thread_preempt_disable();
}
cpu = cpu_id();