summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-01-09 23:39:44 +0100
committerRichard Braun <rbraun@sceen.net>2018-01-10 22:47:34 +0100
commite82501678afa48d33ea6d45c31220bf6a42f3493 (patch)
treeb5f7d03d998f156619af87d220ad5dd1d5b9e09c
parent8168a2ba783cfd3d24b201e619d29c92d6621a1e (diff)
kern/sref: use an slist for review queues
-rw-r--r--kern/sref.c41
-rw-r--r--kern/sref_i.h9
2 files changed, 15 insertions, 35 deletions
diff --git a/kern/sref.c b/kern/sref.c
index 54de6e3b..b64da76a 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -55,6 +55,7 @@
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
+#include <kern/slist.h>
#include <kern/spinlock.h>
#include <kern/sref.h>
#include <kern/sref_i.h>
@@ -79,8 +80,7 @@
#define SREF_NR_COUNTERS_WARN 10000
struct sref_queue {
- struct sref_counter *first;
- struct sref_counter *last;
+ struct slist counters;
unsigned long size;
};
@@ -169,8 +169,7 @@ static struct sref_cache sref_cache __percpu;
static void __init
sref_queue_init(struct sref_queue *queue)
{
- queue->first = NULL;
- queue->last = NULL;
+ slist_init(&queue->counters);
queue->size = 0;
}
@@ -189,15 +188,7 @@ sref_queue_empty(const struct sref_queue *queue)
static void
sref_queue_push(struct sref_queue *queue, struct sref_counter *counter)
{
- counter->next = NULL;
-
- if (queue->last == NULL) {
- queue->first = counter;
- } else {
- queue->last->next = counter;
- }
-
- queue->last = counter;
+ slist_insert_tail(&queue->counters, &counter->node);
queue->size++;
}
@@ -206,13 +197,8 @@ sref_queue_pop(struct sref_queue *queue)
{
struct sref_counter *counter;
- counter = queue->first;
- queue->first = counter->next;
-
- if (queue->last == counter) {
- queue->last = NULL;
- }
-
+ counter = slist_first_entry(&queue->counters, struct sref_counter, node);
+ slist_remove(&queue->counters, NULL);
queue->size--;
return counter;
}
@@ -220,23 +206,14 @@ sref_queue_pop(struct sref_queue *queue)
static void
sref_queue_transfer(struct sref_queue *dest, struct sref_queue *src)
{
- *dest = *src;
+ slist_set_head(&dest->counters, &src->counters);
+ dest->size = src->size;
}
static void
sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2)
{
- if (sref_queue_empty(queue2)) {
- return;
- }
-
- if (sref_queue_empty(queue1)) {
- sref_queue_transfer(queue1, queue2);
- return;
- }
-
- queue1->last->next = queue2->first;
- queue1->last = queue2->last;
+ slist_concat(&queue1->counters, &queue2->counters);
queue1->size += queue2->size;
}
diff --git a/kern/sref_i.h b/kern/sref_i.h
index 5b63857f..09dbbfa8 100644
--- a/kern/sref_i.h
+++ b/kern/sref_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 Richard Braun.
+ * Copyright (c) 2014-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
#include <stdint.h>
+#include <kern/slist.h>
#include <kern/spinlock.h>
#include <kern/work.h>
@@ -44,15 +45,17 @@ struct sref_weakref {
/*
* Scalable reference counter.
*
- * It's tempting to merge the flags into the next member, but since they're
+ * It's tempting to merge the flags into the node member, but since they're
* not protected by the same lock, store them separately.
+ *
+ * TODO Locking keys.
*/
struct sref_counter {
sref_noref_fn_t noref_fn;
union {
struct {
- struct sref_counter *next;
+ struct slist_node node;
struct spinlock lock;
int flags;
unsigned long value;