summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-02-20 23:00:56 +0100
committerRichard Braun <rbraun@sceen.net>2018-02-20 23:01:36 +0100
commitfa5142c3f383241942fb8fb6de84153fc0286a6e (patch)
tree44525a5ed99ca1a8910c7c935f4c51a2350ce319
parent326118bf300cf096cee04cb0a64789151ef8e273 (diff)
kern/llsync: remove module
-rw-r--r--kern/Makefile1
-rw-r--r--kern/clock.c2
-rw-r--r--kern/hlist.h50
-rw-r--r--kern/list.h58
-rw-r--r--kern/llsync.c341
-rw-r--r--kern/llsync.h174
-rw-r--r--kern/llsync_i.h125
-rw-r--r--kern/rdxtree.c47
-rw-r--r--kern/rdxtree.h7
-rw-r--r--kern/slist.h54
-rw-r--r--kern/thread.c7
-rw-r--r--kern/thread.h35
-rw-r--r--kern/thread_i.h4
-rw-r--r--test/Kconfig3
-rw-r--r--test/Makefile1
-rw-r--r--test/test_llsync_defer.c216
-rwxr-xr-xtools/build_configs.py2
-rw-r--r--vm/vm_object.c6
18 files changed, 108 insertions, 1025 deletions
diff --git a/kern/Makefile b/kern/Makefile
index 6c0778b..0d848cc 100644
--- a/kern/Makefile
+++ b/kern/Makefile
@@ -12,7 +12,6 @@ x15_SOURCES-y += \
kern/intr.c \
kern/kernel.c \
kern/kmem.c \
- kern/llsync.c \
kern/log.c \
kern/mutex.c \
kern/panic.c \
diff --git a/kern/clock.c b/kern/clock.c
index c69bf42..0b72a8f 100644
--- a/kern/clock.c
+++ b/kern/clock.c
@@ -22,7 +22,6 @@
#include <kern/clock.h>
#include <kern/clock_i.h>
#include <kern/init.h>
-#include <kern/llsync.h>
#include <kern/percpu.h>
#include <kern/rcu.h>
#include <kern/sref.h>
@@ -90,7 +89,6 @@ void clock_tick_intr(void)
}
timer_report_periodic_event();
- llsync_report_periodic_event();
rcu_report_periodic_event();
sref_report_periodic_event();
work_report_periodic_event();
diff --git a/kern/hlist.h b/kern/hlist.h
index e105cb8..83d64fe 100644
--- a/kern/hlist.h
+++ b/kern/hlist.h
@@ -25,8 +25,8 @@
#include <stddef.h>
#include <kern/hlist_types.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
+#include <kern/rcu.h>
struct hlist;
@@ -266,25 +266,25 @@ for (entry = hlist_first_entry(list, typeof(*entry), member), \
* Return the first node of a list.
*/
static inline struct hlist_node *
-hlist_llsync_first(const struct hlist *list)
+hlist_rcu_first(const struct hlist *list)
{
- return llsync_load_ptr(list->first);
+ return rcu_load_ptr(list->first);
}
/*
* Return the node next to the given node.
*/
static inline struct hlist_node *
-hlist_llsync_next(const struct hlist_node *node)
+hlist_rcu_next(const struct hlist_node *node)
{
- return llsync_load_ptr(node->next);
+ return rcu_load_ptr(node->next);
}
/*
* Insert a node at the head of a list.
*/
static inline void
-hlist_llsync_insert_head(struct hlist *list, struct hlist_node *node)
+hlist_rcu_insert_head(struct hlist *list, struct hlist_node *node)
{
struct hlist_node *first;
@@ -296,26 +296,26 @@ hlist_llsync_insert_head(struct hlist *list, struct hlist_node *node)
first->pprev = &node->next;
}
- llsync_store_ptr(list->first, node);
+ rcu_store_ptr(list->first, node);
}
/*
* Insert a node before another node.
*/
static inline void
-hlist_llsync_insert_before(struct hlist_node *next, struct hlist_node *node)
+hlist_rcu_insert_before(struct hlist_node *next, struct hlist_node *node)
{
node->next = next;
node->pprev = next->pprev;
next->pprev = &node->next;
- llsync_store_ptr(*node->pprev, node);
+ rcu_store_ptr(*node->pprev, node);
}
/*
* Insert a node after another node.
*/
static inline void
-hlist_llsync_insert_after(struct hlist_node *prev, struct hlist_node *node)
+hlist_rcu_insert_after(struct hlist_node *prev, struct hlist_node *node)
{
node->next = prev->next;
node->pprev = &prev->next;
@@ -324,48 +324,48 @@ hlist_llsync_insert_after(struct hlist_node *prev, struct hlist_node *node)
node->next->pprev = &node->next;
}
- llsync_store_ptr(prev->next, node);
+ rcu_store_ptr(prev->next, node);
}
/*
* Remove a node from a list.
*/
static inline void
-hlist_llsync_remove(struct hlist_node *node)
+hlist_rcu_remove(struct hlist_node *node)
{
if (node->next != NULL) {
node->next->pprev = node->pprev;
}
- llsync_store_ptr(*node->pprev, node->next);
+ rcu_store_ptr(*node->pprev, node->next);
}
/*
* Macro that evaluates to the address of the structure containing the
* given node based on the given type and member.
*/
-#define hlist_llsync_entry(node, type, member) \
- structof(llsync_load_ptr(node), type, member)
+#define hlist_rcu_entry(node, type, member) \
+ structof(rcu_load_ptr(node), type, member)
/*
* Get the first entry of a list.
*/
-#define hlist_llsync_first_entry(list, type, member) \
+#define hlist_rcu_first_entry(list, type, member) \
MACRO_BEGIN \
struct hlist_node *___first; \
\
- ___first = hlist_llsync_first(list); \
+ ___first = hlist_rcu_first(list); \
hlist_end(___first) ? NULL : hlist_entry(___first, type, member); \
MACRO_END
/*
* Get the entry next to the given entry.
*/
-#define hlist_llsync_next_entry(entry, member) \
+#define hlist_rcu_next_entry(entry, member) \
MACRO_BEGIN \
struct hlist_node *___next; \
\
- ___next = hlist_llsync_next(&entry->member); \
+ ___next = hlist_rcu_next(&entry->member); \
hlist_end(___next) \
? NULL \
: hlist_entry(___next, typeof(*entry), member); \
@@ -374,17 +374,17 @@ MACRO_END
/*
* Forge a loop to process all nodes of a list.
*/
-#define hlist_llsync_for_each(list, node) \
-for (node = hlist_llsync_first(list); \
+#define hlist_rcu_for_each(list, node) \
+for (node = hlist_rcu_first(list); \
!hlist_end(node); \
- node = hlist_llsync_next(node))
+ node = hlist_rcu_next(node))
/*
* Forge a loop to process all entries of a list.
*/
-#define hlist_llsync_for_each_entry(list, entry, member) \
-for (entry = hlist_llsync_first_entry(list, typeof(*entry), member); \
+#define hlist_rcu_for_each_entry(list, entry, member) \
+for (entry = hlist_rcu_first_entry(list, typeof(*entry), member); \
entry != NULL; \
- entry = hlist_llsync_next_entry(entry, member))
+ entry = hlist_rcu_next_entry(entry, member))
#endif /* _KERN_HLIST_H */
diff --git a/kern/list.h b/kern/list.h
index f48d7d7..7ea3992 100644
--- a/kern/list.h
+++ b/kern/list.h
@@ -25,8 +25,8 @@
#include <stddef.h>
#include <kern/list_types.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
+#include <kern/rcu.h>
/*
* Structure used as both head and node.
@@ -385,18 +385,18 @@ for (entry = list_last_entry(list, typeof(*entry), member), \
* Return the first node of a list.
*/
static inline struct list *
-list_llsync_first(const struct list *list)
+list_rcu_first(const struct list *list)
{
- return llsync_load_ptr(list->next);
+ return rcu_load_ptr(list->next);
}
/*
* Return the node next to the given node.
*/
static inline struct list *
-list_llsync_next(const struct list *node)
+list_rcu_next(const struct list *node)
{
- return llsync_load_ptr(node->next);
+ return rcu_load_ptr(node->next);
}
/*
@@ -405,11 +405,11 @@ list_llsync_next(const struct list *node)
* This function is private.
*/
static inline void
-list_llsync_add(struct list *prev, struct list *next, struct list *node)
+list_rcu_add(struct list *prev, struct list *next, struct list *node)
{
node->next = next;
node->prev = prev;
- llsync_store_ptr(prev->next, node);
+ rcu_store_ptr(prev->next, node);
next->prev = node;
}
@@ -417,36 +417,36 @@ list_llsync_add(struct list *prev, struct list *next, struct list *node)
* Insert a node at the head of a list.
*/
static inline void
-list_llsync_insert_head(struct list *list, struct list *node)
+list_rcu_insert_head(struct list *list, struct list *node)
{
- list_llsync_add(list, list->next, node);
+ list_rcu_add(list, list->next, node);
}
/*
* Insert a node at the tail of a list.
*/
static inline void
-list_llsync_insert_tail(struct list *list, struct list *node)
+list_rcu_insert_tail(struct list *list, struct list *node)
{
- list_llsync_add(list->prev, list, node);
+ list_rcu_add(list->prev, list, node);
}
/*
* Insert a node before another node.
*/
static inline void
-list_llsync_insert_before(struct list *next, struct list *node)
+list_rcu_insert_before(struct list *next, struct list *node)
{
- list_llsync_add(next->prev, next, node);
+ list_rcu_add(next->prev, next, node);
}
/*
* Insert a node after another node.
*/
static inline void
-list_llsync_insert_after(struct list *prev, struct list *node)
+list_rcu_insert_after(struct list *prev, struct list *node)
{
- list_llsync_add(prev, prev->next, node);
+ list_rcu_add(prev, prev->next, node);
}
/*
@@ -455,18 +455,18 @@ list_llsync_insert_after(struct list *prev, struct list *node)
* After completion, the node is stale.
*/
static inline void
-list_llsync_remove(struct list *node)
+list_rcu_remove(struct list *node)
{
node->next->prev = node->prev;
- llsync_store_ptr(node->prev->next, node->next);
+ rcu_store_ptr(node->prev->next, node->next);
}
/*
* Macro that evaluates to the address of the structure containing the
* given node based on the given type and member.
*/
-#define list_llsync_entry(node, type, member) \
- structof(llsync_load_ptr(node), type, member)
+#define list_rcu_entry(node, type, member) \
+ structof(rcu_load_ptr(node), type, member)
/*
* Get the first entry of a list.
@@ -475,13 +475,13 @@ list_llsync_remove(struct list *node)
* the node pointer can only be read once, preventing the combination
* of lockless list_empty()/list_first_entry() variants.
*/
-#define list_llsync_first_entry(list, type, member) \
+#define list_rcu_first_entry(list, type, member) \
MACRO_BEGIN \
struct list *___list; \
struct list *___first; \
\
___list = (list); \
- ___first = list_llsync_first(___list); \
+ ___first = list_rcu_first(___list); \
list_end(___list, ___first) \
? NULL \
: list_entry(___first, type, member); \
@@ -494,29 +494,29 @@ MACRO_END
* the node pointer can only be read once, preventing the combination
* of lockless list_empty()/list_next_entry() variants.
*/
-#define list_llsync_next_entry(entry, member) \
- list_llsync_first_entry(&entry->member, typeof(*entry), member)
+#define list_rcu_next_entry(entry, member) \
+ list_rcu_first_entry(&entry->member, typeof(*entry), member)
/*
* Forge a loop to process all nodes of a list.
*
* The node must not be altered during the loop.
*/
-#define list_llsync_for_each(list, node) \
-for (node = list_llsync_first(list); \
+#define list_rcu_for_each(list, node) \
+for (node = list_rcu_first(list); \
!list_end(list, node); \
- node = list_llsync_next(node))
+ node = list_rcu_next(node))
/*
* Forge a loop to process all entries of a list.
*
* The entry node must not be altered during the loop.
*/
-#define list_llsync_for_each_entry(list, entry, member) \
-for (entry = list_llsync_entry(list_first(list), \
+#define list_rcu_for_each_entry(list, entry, member) \
+for (entry = list_rcu_entry(list_first(list), \
typeof(*entry), member); \
!list_end(list, &entry->member); \
- entry = list_llsync_entry(list_next(&entry->member), \
+ entry = list_rcu_entry(list_next(&entry->member), \
typeof(*entry), member))
#endif /* _KERN_LIST_H */
diff --git a/kern/llsync.c b/kern/llsync.c
deleted file mode 100644
index b7b8b2f..0000000
--- a/kern/llsync.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2013-2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * The method used by this module is described in the expired US patent
- * 4809168, "Passive Serialization in a Multitasking Environment". It is
- * similar to "Classic RCU (Read-Copy Update)" as found in Linux 2.6, with
- * the notable difference that RCU actively starts grace periods, where
- * passive serialization waits for two sequential "multiprocess checkpoints"
- * (renamed global checkpoints in this implementation) to occur.
- *
- * It is used instead of RCU because of patents that may not allow writing
- * an implementation not based on the Linux code (see
- * http://lists.lttng.org/pipermail/lttng-dev/2013-May/020305.html). As
- * patents expire, this module could be reworked to become a true RCU
- * implementation. In the mean time, the module interface was carefully
- * designed to be similar to RCU.
- *
- * TODO Gracefully handle large amounts of deferred works.
- */
-
-#include <assert.h>
-#include <stdbool.h>
-#include <stddef.h>
-
-#include <kern/condition.h>
-#include <kern/cpumap.h>
-#include <kern/init.h>
-#include <kern/list.h>
-#include <kern/log.h>
-#include <kern/llsync.h>
-#include <kern/llsync_i.h>
-#include <kern/macros.h>
-#include <kern/mutex.h>
-#include <kern/percpu.h>
-#include <kern/spinlock.h>
-#include <kern/syscnt.h>
-#include <kern/work.h>
-#include <kern/thread.h>
-#include <machine/cpu.h>
-
-/*
- * Initial global checkpoint ID.
- *
- * Set to a high value to make sure overflows are correctly handled.
- */
-#define LLSYNC_INITIAL_GCID ((unsigned int)-10)
-
-/*
- * Number of pending works beyond which to issue a warning.
- */
-#define LLSYNC_NR_PENDING_WORKS_WARN 10000
-
-struct llsync_data llsync_data;
-struct llsync_cpu_data llsync_cpu_data __percpu;
-
-struct llsync_waiter {
- struct work work;
- struct mutex lock;
- struct condition cond;
- int done;
-};
-
-static bool llsync_is_ready __read_mostly = false;
-
-bool
-llsync_ready(void)
-{
- return llsync_is_ready;
-}
-
-static int __init
-llsync_setup(void)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned int i;
-
- spinlock_init(&llsync_data.lock);
- work_queue_init(&llsync_data.queue0);
- work_queue_init(&llsync_data.queue1);
- syscnt_register(&llsync_data.sc_global_checkpoints,
- "llsync_global_checkpoints");
- syscnt_register(&llsync_data.sc_periodic_checkins,
- "llsync_periodic_checkins");
- syscnt_register(&llsync_data.sc_failed_periodic_checkins,
- "llsync_failed_periodic_checkins");
- llsync_data.gcid.value = LLSYNC_INITIAL_GCID;
-
- for (i = 0; i < cpu_count(); i++) {
- cpu_data = percpu_ptr(llsync_cpu_data, i);
- work_queue_init(&cpu_data->queue0);
- }
-
- return 0;
-}
-
-INIT_OP_DEFINE(llsync_setup,
- INIT_OP_DEP(cpu_mp_probe, true),
- INIT_OP_DEP(log_setup, true),
- INIT_OP_DEP(mutex_setup, true),
- INIT_OP_DEP(spinlock_setup, true),
- INIT_OP_DEP(syscnt_setup, true),
- INIT_OP_DEP(thread_bootstrap, true));
-
-static void
-llsync_process_global_checkpoint(void)
-{
- struct work_queue queue;
- unsigned int nr_works;
-
- assert(cpumap_find_first(&llsync_data.pending_checkpoints) == -1);
- assert(llsync_data.nr_pending_checkpoints == 0);
-
- nr_works = work_queue_nr_works(&llsync_data.queue0)
- + work_queue_nr_works(&llsync_data.queue1);
-
- /* TODO Handle hysteresis */
- if (!llsync_data.no_warning && (nr_works >= LLSYNC_NR_PENDING_WORKS_WARN)) {
- llsync_data.no_warning = 1;
- log_warning("llsync: large number of pending works\n");
- }
-
- if (llsync_data.nr_registered_cpus == 0) {
- work_queue_concat(&llsync_data.queue1, &llsync_data.queue0);
- work_queue_init(&llsync_data.queue0);
- } else {
- cpumap_copy(&llsync_data.pending_checkpoints, &llsync_data.registered_cpus);
- llsync_data.nr_pending_checkpoints = llsync_data.nr_registered_cpus;
- }
-
- work_queue_transfer(&queue, &llsync_data.queue1);
- work_queue_transfer(&llsync_data.queue1, &llsync_data.queue0);
- work_queue_init(&llsync_data.queue0);
-
- if (work_queue_nr_works(&queue) != 0) {
- work_queue_schedule(&queue, 0);
- }
-
- llsync_data.gcid.value++;
- syscnt_inc(&llsync_data.sc_global_checkpoints);
-}
-
-static void
-llsync_flush_works(struct llsync_cpu_data *cpu_data)
-{
- if (work_queue_nr_works(&cpu_data->queue0) == 0) {
- return;
- }
-
- work_queue_concat(&llsync_data.queue0, &cpu_data->queue0);
- work_queue_init(&cpu_data->queue0);
-}
-
-static void
-llsync_commit_checkpoint(unsigned int cpu)
-{
- int pending;
-
- pending = cpumap_test(&llsync_data.pending_checkpoints, cpu);
-
- if (!pending) {
- return;
- }
-
- cpumap_clear(&llsync_data.pending_checkpoints, cpu);
- llsync_data.nr_pending_checkpoints--;
-
- if (llsync_data.nr_pending_checkpoints == 0) {
- llsync_process_global_checkpoint();
- }
-}
-
-void
-llsync_register(void)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned long flags;
- unsigned int cpu;
-
- if (!llsync_is_ready) {
- llsync_is_ready = true;
- }
-
- cpu = cpu_id();
- cpu_data = llsync_get_cpu_data();
-
- spinlock_lock_intr_save(&llsync_data.lock, &flags);
-
- assert(!cpu_data->registered);
- assert(work_queue_nr_works(&cpu_data->queue0) == 0);
- cpu_data->registered = 1;
- cpu_data->gcid = llsync_data.gcid.value;
-
- assert(!cpumap_test(&llsync_data.registered_cpus, cpu));
- cpumap_set(&llsync_data.registered_cpus, cpu);
- llsync_data.nr_registered_cpus++;
-
- assert(!cpumap_test(&llsync_data.pending_checkpoints, cpu));
-
- if ((llsync_data.nr_registered_cpus == 1)
- && (llsync_data.nr_pending_checkpoints == 0)) {
- llsync_process_global_checkpoint();
- }
-
- spinlock_unlock_intr_restore(&llsync_data.lock, flags);
-}
-
-void
-llsync_unregister(void)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned long flags;
- unsigned int cpu;
-
- cpu = cpu_id();
- cpu_data = llsync_get_cpu_data();
-
- spinlock_lock_intr_save(&llsync_data.lock, &flags);
-
- llsync_flush_works(cpu_data);
-
- assert(cpu_data->registered);
- cpu_data->registered = 0;
-
- assert(cpumap_test(&llsync_data.registered_cpus, cpu));
- cpumap_clear(&llsync_data.registered_cpus, cpu);
- llsync_data.nr_registered_cpus--;
-
- /*
- * Processor registration qualifies as a checkpoint. Since unregistering
- * a processor also disables commits until it's registered again, perform
- * one now.
- */
- llsync_commit_checkpoint(cpu);
-
- spinlock_unlock_intr_restore(&llsync_data.lock, flags);
-}
-
-void
-llsync_report_periodic_event(void)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned int gcid;
-
- assert(thread_check_intr_context());
-
- cpu_data = llsync_get_cpu_data();
-
- if (!cpu_data->registered) {
- assert(work_queue_nr_works(&cpu_data->queue0) == 0);
- return;
- }
-
- spinlock_lock(&llsync_data.lock);
-
- llsync_flush_works(cpu_data);
-
- gcid = llsync_data.gcid.value;
- assert((gcid - cpu_data->gcid) <= 1);
-
- /*
- * If the local copy of the global checkpoint ID matches the true
- * value, the current processor has checked in.
- *
- * Otherwise, there were no checkpoint since the last global checkpoint.
- * Check whether this periodic event occurred during a read-side critical
- * section, and if not, trigger a checkpoint.
- */
- if (cpu_data->gcid == gcid) {
- llsync_commit_checkpoint(cpu_id());
- } else {
- if (thread_llsync_in_read_cs()) {
- syscnt_inc(&llsync_data.sc_failed_periodic_checkins);
- } else {
- cpu_data->gcid = gcid;
- syscnt_inc(&llsync_data.sc_periodic_checkins);
- llsync_commit_checkpoint(cpu_id());
- }
- }
-
- spinlock_unlock(&llsync_data.lock);
-}
-
-void
-llsync_defer(struct work *work)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned long flags;
-
- thread_preempt_disable_intr_save(&flags);
- cpu_data = llsync_get_cpu_data();
- work_queue_push(&cpu_data->queue0, work);
- thread_preempt_enable_intr_restore(flags);
-}
-
-static void
-llsync_signal(struct work *work)
-{
- struct llsync_waiter *waiter;
-
- waiter = structof(work, struct llsync_waiter, work);
-
- mutex_lock(&waiter->lock);
- waiter->done = 1;
- condition_signal(&waiter->cond);
- mutex_unlock(&waiter->lock);
-}
-
-void
-llsync_wait(void)
-{
- struct llsync_waiter waiter;
-
- work_init(&waiter.work, llsync_signal);
- mutex_init(&waiter.lock);
- condition_init(&waiter.cond);
- waiter.done = 0;
-
- llsync_defer(&waiter.work);
-
- mutex_lock(&waiter.lock);
-
- while (!waiter.done) {
- condition_wait(&waiter.cond, &waiter.lock);
- }
-
- mutex_unlock(&waiter.lock);
-}
diff --git a/kern/llsync.h b/kern/llsync.h
deleted file mode 100644
index 4a0d026..0000000
--- a/kern/llsync.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2013-2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * Lockless synchronization.
- *
- * The llsync module provides services similar to RCU (Read-Copy Update).
- * As such, it can be thought of as an efficient reader-writer lock
- * replacement. It is efficient because read-side critical sections
- * don't use expensive synchronization mechanisms such as locks or atomic
- * instructions. Lockless synchronization is therefore best used for
- * read-mostly objects. Updating still requires conventional lock-based
- * synchronization.
- *
- * The basic idea is that read-side critical sections are assumed to hold
- * read-side references, and objects for which there may be read-side
- * references must exist as long as such references may be held. The llsync
- * module tracks special system events to determine when read-side references
- * can no longer exist.
- *
- * Since read-side critical sections can run concurrently with updates,
- * it is important to make sure that objects are consistent when being
- * accessed. This is achieved with a publish/subscribe mechanism that relies
- * on the natural atomicity of machine word updates in memory, i.e. all
- * supported architectures must guarantee that, when updating a word, and
- * in turn a pointer, other processors reading that word obtain a valid
- * value, that is either the previous or the next value of the word, but not
- * a mixed-up value. The llsync module provides the llsync_store_ptr() and
- * llsync_load_ptr() wrappers that take care of low level details such as
- * compiler and memory barriers, so that objects are completely built and
- * consistent when published and accessed.
- *
- * As objects are published through pointers, multiple versions can exist at
- * the same time. Previous versions cannot be deleted as long as read-side
- * references may exist. Operations that must wait for all read-side references
- * to be dropped can be either synchronous, i.e. block until it is safe to
- * proceed, or be deferred, in which case they are queued and later handed to
- * the work module. As a result, special care must be taken if using lockless
- * synchronization in the work module itself.
- *
- * The two system events tracked by the llsync module are context switches
- * and a periodic event, normally the periodic timer interrupt that drives
- * the scheduler. Context switches are used as checkpoint triggers. A
- * checkpoint is a point in execution at which no read-side reference can
- * exist, i.e. the processor isn't running any read-side critical section.
- * Since context switches can be very frequent, a checkpoint is local to
- * the processor and lightweight. The periodic event is used to commit
- * checkpoints globally so that other processors are aware of the progress
- * of one another. As the system allows situations in which two periodic
- * events can occur without a single context switch, the periodic event is
- * also used as a checkpoint trigger. When all checkpoints have been
- * committed, a global checkpoint occurs. The occurrence of global checkpoints
- * allows the llsync module to determine when it is safe to process deferred
- * work or unblock update sides.
- */
-
-#ifndef _KERN_LLSYNC_H
-#define _KERN_LLSYNC_H
-
-#include <stdbool.h>
-
-#include <kern/atomic.h>
-#include <kern/macros.h>
-#include <kern/llsync_i.h>
-#include <kern/thread.h>
-#include <kern/work.h>
-
-/*
- * Safely store a pointer.
- */
-#define llsync_store_ptr(ptr, value) atomic_store(&(ptr), value, ATOMIC_RELEASE)
-
-/*
- * Safely load a pointer.
- */
-#define llsync_load_ptr(ptr) atomic_load(&(ptr), ATOMIC_CONSUME)
-
-/*
- * Read-side critical section enter/exit functions.
- *
- * It is not allowed to block inside a read-side critical section.
- */
-
-static inline void
-llsync_read_enter(void)
-{
- int in_read_cs;
-
- in_read_cs = thread_llsync_in_read_cs();
- thread_llsync_read_inc();
-
- if (!in_read_cs) {
- thread_preempt_disable();
- }
-}
-
-static inline void
-llsync_read_exit(void)
-{
- thread_llsync_read_dec();
-
- if (!thread_llsync_in_read_cs()) {
- thread_preempt_enable();
- }
-}
-
-/*
- * Return true if the llsync module is initialized, false otherwise.
- */
-bool llsync_ready(void);
-
-/*
- * Manage registration of the current processor.
- *
- * The caller must not be allowed to migrate when calling these functions.
- *
- * Registering tells the llsync module that the current processor reports
- * context switches and periodic events.
- *
- * When a processor enters a state in which checking in becomes irrelevant,
- * it unregisters itself so that the other registered processors don't need
- * to wait for it to make progress. For example, this is done inside the
- * idle loop since it is obviously impossible to enter a read-side critical
- * section while idling.
- */
-void llsync_register(void);
-void llsync_unregister(void);
-
-/*
- * Report a context switch on the current processor.
- *
- * Interrupts and preemption must be disabled when calling this function.
- */
-static inline void
-llsync_report_context_switch(void)
-{
- llsync_checkin();
-}
-
-/*
- * Report a periodic event on the current processor.
- *
- * Interrupts and preemption must be disabled when calling this function.
- */
-void llsync_report_periodic_event(void);
-
-/*
- * Defer an operation until all existing read-side references are dropped,
- * without blocking.
- */
-void llsync_defer(struct work *work);
-
-/*
- * Wait for all existing read-side references to be dropped.
- *
- * This function sleeps, and may do so for a moderately long duration (a few
- * system timer ticks).
- */
-void llsync_wait(void);
-
-#endif /* _KERN_LLSYNC_H */
diff --git a/kern/llsync_i.h b/kern/llsync_i.h
deleted file mode 100644
index e043eb9..0000000
--- a/kern/llsync_i.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2013-2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _KERN_LLSYNC_I_H
-#define _KERN_LLSYNC_I_H
-
-#include <assert.h>
-#include <stdalign.h>
-
-#include <kern/cpumap.h>
-#include <kern/macros.h>
-#include <kern/spinlock.h>
-#include <kern/syscnt.h>
-#include <kern/work.h>
-#include <machine/cpu.h>
-
-/*
- * Global data.
- *
- * The queue number matches the number of global checkpoints that occurred
- * since works contained in it were added. After two global checkpoints,
- * works are scheduled for processing.
- *
- * Interrupts must be disabled when acquiring the global data lock.
- */
-struct llsync_data {
- struct spinlock lock;
- struct cpumap registered_cpus;
- unsigned int nr_registered_cpus;
- struct cpumap pending_checkpoints;
- unsigned int nr_pending_checkpoints;
- int no_warning;
- struct work_queue queue0;
- struct work_queue queue1;
- struct syscnt sc_global_checkpoints;
- struct syscnt sc_periodic_checkins;
- struct syscnt sc_failed_periodic_checkins;
-
- /*
- * Global checkpoint ID.
- *
- * This variable can be frequently accessed from many processors so :
- * - reserve a whole cache line for it
- * - apply optimistic accesses to reduce contention
- */
- struct {
- alignas(CPU_L1_SIZE) volatile unsigned int value;
- } gcid;
-};
-
-extern struct llsync_data llsync_data;
-
-/*
- * Per-processor data.
- *
- * Every processor records whether it is registered and a local copy of the
- * global checkpoint ID, which is meaningless on unregistered processors.
- * The true global checkpoint ID is incremented when a global checkpoint occurs,
- * after which all the local copies become stale. Checking in synchronizes
- * the local copy of the global checkpoint ID.
- *
- * When works are deferred, they are initially added to a processor-local
- * queue. This queue is regularly flushed to the global data, an operation
- * that occurs every time a processor may commit a checkpoint. The downside
- * of this scalability optimization is that it introduces some additional
- * latency for works that are added to a processor queue between a flush and
- * a global checkpoint.
- *
- * Interrupts and preemption must be disabled on access.
- */
-struct llsync_cpu_data {
- int registered;
- unsigned int gcid;
- struct work_queue queue0;
-};
-
-extern struct llsync_cpu_data llsync_cpu_data;
-
-static inline struct llsync_cpu_data *
-llsync_get_cpu_data(void)
-{
- return cpu_local_ptr(llsync_cpu_data);
-}
-
-static inline void
-llsync_checkin(void)
-{
- struct llsync_cpu_data *cpu_data;
- unsigned int gcid;
-
- assert(!cpu_intr_enabled());
- assert(!thread_preempt_enabled());
-
- cpu_data = llsync_get_cpu_data();
-
- if (!cpu_data->registered) {
- assert(work_queue_nr_works(&cpu_data->queue0) == 0);
- return;
- }
-
- /*
- * The global checkpoint ID obtained might be obsolete here, in which
- * case a commit will not determine that a checkpoint actually occurred.
- * This should seldom happen.
- */
- gcid = llsync_data.gcid.value;
- assert((gcid - cpu_data->gcid) <= 1);
- cpu_data->gcid = gcid;
-}
-
-#endif /* _KERN_LLSYNC_I_H */
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
index 678ec17..9e2e759 100644
--- a/kern/rdxtree.c
+++ b/kern/rdxtree.c
@@ -25,8 +25,8 @@
#include <kern/error.h>
#include <kern/init.h>
#include <kern/kmem.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
+#include <kern/rcu.h>
#include <kern/rdxtree.h>
#include <kern/rdxtree_i.h>
#include <kern/work.h>
@@ -190,17 +190,7 @@ rdxtree_node_schedule_destruction(struct rdxtree_node *node)
assert(node->parent == NULL);
work_init(&node->work, rdxtree_node_destroy_deferred);
-
- /*
- * This assumes that llsync is initialized before scheduling is started
- * so that there can be no read-side reference when destroying the node.
- */
- if (!llsync_ready()) {
- rdxtree_node_destroy(node);
- return;
- }
-
- llsync_defer(&node->work);
+ rcu_defer(&node->work);
}
static inline void
@@ -238,7 +228,7 @@ rdxtree_node_insert(struct rdxtree_node *node, unsigned short index,
assert(node->entries[index] == NULL);
node->nr_entries++;
- llsync_store_ptr(node->entries[index], entry);
+ rcu_store_ptr(node->entries[index], entry);
}
static inline void
@@ -255,7 +245,7 @@ rdxtree_node_remove(struct rdxtree_node *node, unsigned short index)
assert(node->entries[index] != NULL);
node->nr_entries--;
- llsync_store_ptr(node->entries[index], NULL);
+ rcu_store_ptr(node->entries[index], NULL);
}
static inline void *
@@ -267,7 +257,7 @@ rdxtree_node_find(struct rdxtree_node *node, unsigned short *indexp)
index = *indexp;
while (index < ARRAY_SIZE(node->entries)) {
- ptr = rdxtree_entry_addr(llsync_load_ptr(node->entries[index]));
+ ptr = rdxtree_entry_addr(rcu_load_ptr(node->entries[index]));
if (ptr != NULL) {
*indexp = index;
@@ -355,7 +345,7 @@ rdxtree_shrink(struct rdxtree *tree)
rdxtree_node_unlink(rdxtree_entry_addr(entry));
}
- llsync_store_ptr(tree->root, entry);
+ rcu_store_ptr(tree->root, entry);
/*
* There is still one valid entry (the first one) in this node. It
@@ -410,7 +400,7 @@ rdxtree_grow(struct rdxtree *tree, rdxtree_key_t key)
rdxtree_node_insert(node, 0, tree->root);
tree->height++;
- llsync_store_ptr(tree->root, rdxtree_node_to_entry(node));
+ rcu_store_ptr(tree->root, rdxtree_node_to_entry(node));
root = node;
} while (new_height > tree->height);
@@ -433,7 +423,7 @@ rdxtree_cleanup(struct rdxtree *tree, struct rdxtree_node *node)
if (node->parent == NULL) {
tree->height = 0;
- llsync_store_ptr(tree->root, NULL);
+ rcu_store_ptr(tree->root, NULL);
rdxtree_node_schedule_destruction(node);
break;
}
@@ -488,7 +478,7 @@ rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
return ERROR_BUSY;
}
- llsync_store_ptr(tree->root, ptr);
+ rcu_store_ptr(tree->root, ptr);
if (slotp != NULL) {
*slotp = &tree->root;
@@ -516,7 +506,7 @@ rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
}
if (prev == NULL) {
- llsync_store_ptr(tree->root, rdxtree_node_to_entry(node));
+ rcu_store_ptr(tree->root, rdxtree_node_to_entry(node));
} else {
rdxtree_node_link(node, prev, index);
rdxtree_node_insert_node(prev, index, node);
@@ -565,7 +555,7 @@ rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
if (unlikely(height == 0)) {
if (tree->root == NULL) {
- llsync_store_ptr(tree->root, ptr);
+ rcu_store_ptr(tree->root, ptr);
*keyp = 0;
if (slotp != NULL) {
@@ -661,7 +651,7 @@ rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key)
node = rdxtree_entry_addr(tree->root);
if (unlikely(height == 0)) {
- llsync_store_ptr(tree->root, NULL);
+ rcu_store_ptr(tree->root, NULL);
return node;
}
@@ -700,7 +690,7 @@ rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
unsigned short height, shift, index;
void *entry;
- entry = llsync_load_ptr(tree->root);
+ entry = rcu_load_ptr(tree->root);
if (entry == NULL) {
node = NULL;
@@ -731,7 +721,7 @@ rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
prev = node;
index = (unsigned short)(key >> shift) & RDXTREE_RADIX_MASK;
- entry = llsync_load_ptr(node->entries[index]);
+ entry = rcu_load_ptr(node->entries[index]);
node = rdxtree_entry_addr(entry);
shift -= RDXTREE_RADIX;
height--;
@@ -755,7 +745,7 @@ rdxtree_replace_slot(void **slot, void *ptr)
old = *slot;
assert(old != NULL);
rdxtree_assert_alignment(old);
- llsync_store_ptr(*slot, ptr);
+ rcu_store_ptr(*slot, ptr);
return old;
}
@@ -767,7 +757,7 @@ rdxtree_walk_next(struct rdxtree *tree, struct rdxtree_iter *iter)
rdxtree_key_t key;
void *entry;
- entry = llsync_load_ptr(tree->root);
+ entry = rcu_load_ptr(tree->root);
if (entry == NULL) {
return NULL;
@@ -863,7 +853,7 @@ rdxtree_remove_all(struct rdxtree *tree)
if (tree->height == 0) {
if (tree->root != NULL) {
- llsync_store_ptr(tree->root, NULL);
+ rcu_store_ptr(tree->root, NULL);
}
return;
@@ -906,4 +896,5 @@ rdxtree_setup(void)
}
INIT_OP_DEFINE(rdxtree_setup,
- INIT_OP_DEP(kmem_bootstrap, true));
+ INIT_OP_DEP(kmem_bootstrap, true),
+ INIT_OP_DEP(rcu_bootstrap, true));
diff --git a/kern/rdxtree.h b/kern/rdxtree.h
index 1430208..f19f349 100644
--- a/kern/rdxtree.h
+++ b/kern/rdxtree.h
@@ -19,8 +19,7 @@
*
* In addition to the standard insertion operation, this implementation can
* allocate keys for the caller at insertion time. It also allows lookups to
- * occur concurrently with updates through the use of lockless synchronization
- * (see the llsync module).
+ * occur concurrently with updates using RCU.
*/
#ifndef _KERN_RDXTREE_H
@@ -30,7 +29,7 @@
#include <stdint.h>
#include <kern/init.h>
-#include <kern/llsync.h>
+#include <kern/rcu.h>
typedef uint64_t rdxtree_key_t;
@@ -160,7 +159,7 @@ rdxtree_lookup_slot(const struct rdxtree *tree, rdxtree_key_t key)
static inline void *
rdxtree_load_slot(void **slot)
{
- return llsync_load_ptr(*slot);
+ return rcu_load_ptr(*slot);
}
/*
diff --git a/kern/slist.h b/kern/slist.h
index c0dcb08..d708f38 100644
--- a/kern/slist.h
+++ b/kern/slist.h
@@ -24,8 +24,8 @@
#include <stdbool.h>
#include <stddef.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
+#include <kern/rcu.h>
#include <kern/slist_types.h>
struct slist;
@@ -303,46 +303,46 @@ for (entry = slist_first_entry(list, typeof(*entry), member), \
* Return the first node of a list.
*/
static inline struct slist_node *
-slist_llsync_first(const struct slist *list)
+slist_rcu_first(const struct slist *list)
{
- return llsync_load_ptr(list->first);
+ return rcu_load_ptr(list->first);
}
/*
* Return the node next to the given node.
*/
static inline struct slist_node *
-slist_llsync_next(const struct slist_node *node)
+slist_rcu_next(const struct slist_node *node)
{
- return llsync_load_ptr(node->next);
+ return rcu_load_ptr(node->next);
}
/*
* Insert a node at the head of a list.
*/
static inline void
-slist_llsync_insert_head(struct slist *list, struct slist_node *node)
+slist_rcu_insert_head(struct slist *list, struct slist_node *node)
{
if (slist_empty(list)) {
list->last = node;
}
node->next = list->first;
- llsync_store_ptr(list->first, node);
+ rcu_store_ptr(list->first, node);
}
/*
* Insert a node at the tail of a list.
*/
static inline void
-slist_llsync_insert_tail(struct slist *list, struct slist_node *node)
+slist_rcu_insert_tail(struct slist *list, struct slist_node *node)
{
node->next = NULL;
if (slist_empty(list)) {
- llsync_store_ptr(list->first, node);
+ rcu_store_ptr(list->first, node);
} else {
- llsync_store_ptr(list->last->next, node);
+ rcu_store_ptr(list->last->next, node);
}
list->last = node;
@@ -354,11 +354,11 @@ slist_llsync_insert_tail(struct slist *list, struct slist_node *node)
* The prev node must be valid.
*/
static inline void
-slist_llsync_insert_after(struct slist *list, struct slist_node *prev,
+slist_rcu_insert_after(struct slist *list, struct slist_node *prev,
struct slist_node *node)
{
node->next = prev->next;
- llsync_store_ptr(prev->next, node);
+ rcu_store_ptr(prev->next, node);
if (list->last == prev) {
list->last = node;
@@ -373,20 +373,20 @@ slist_llsync_insert_after(struct slist *list, struct slist_node *prev,
* first node is removed.
*/
static inline void
-slist_llsync_remove(struct slist *list, struct slist_node *prev)
+slist_rcu_remove(struct slist *list, struct slist_node *prev)
{
struct slist_node *node;
if (slist_end(prev)) {
node = list->first;
- llsync_store_ptr(list->first, node->next);
+ rcu_store_ptr(list->first, node->next);
if (list->last == node) {
list->last = NULL;
}
} else {
node = prev->next;
- llsync_store_ptr(prev->next, node->next);
+ rcu_store_ptr(prev->next, node->next);
if (list->last == node) {
list->last = prev;
@@ -398,28 +398,28 @@ slist_llsync_remove(struct slist *list, struct slist_node *prev)
* Macro that evaluates to the address of the structure containing the
* given node based on the given type and member.
*/
-#define slist_llsync_entry(node, type, member) \
- structof(llsync_load_ptr(node), type, member)
+#define slist_rcu_entry(node, type, member) \
+ structof(rcu_load_ptr(node), type, member)
/*
* Get the first entry of a list.
*/
-#define slist_llsync_first_entry(list, type, member) \
+#define slist_rcu_first_entry(list, type, member) \
MACRO_BEGIN \
struct slist_node *___first; \
\
- ___first = slist_llsync_first(list); \
+ ___first = slist_rcu_first(list); \
slist_end(___first) ? NULL : slist_entry(___first, type, member); \
MACRO_END
/*
* Get the entry next to the given entry.
*/
-#define slist_llsync_next_entry(entry, member) \
+#define slist_rcu_next_entry(entry, member) \
MACRO_BEGIN \
struct slist_node *___next; \
\
- ___next = slist_llsync_next(&entry->member); \
+ ___next = slist_rcu_next(&entry->member); \
slist_end(___next) \
? NULL \
: slist_entry(___next, typeof(*entry), member); \
@@ -428,17 +428,17 @@ MACRO_END
/*
* Forge a loop to process all nodes of a list.
*/
-#define slist_llsync_for_each(list, node) \
-for (node = slist_llsync_first(list); \
+#define slist_rcu_for_each(list, node) \
+for (node = slist_rcu_first(list); \
!slist_end(node); \
- node = slist_llsync_next(node))
+ node = slist_rcu_next(node))
/*
* Forge a loop to process all entries of a list.
*/
-#define slist_llsync_for_each_entry(list, entry, member) \
-for (entry = slist_llsync_first_entry(list, typeof(*entry), member); \
+#define slist_rcu_for_each_entry(list, entry, member) \
+for (entry = slist_rcu_first_entry(list, typeof(*entry), member); \
entry != NULL; \
- entry = slist_llsync_next_entry(entry, member))
+ entry = slist_rcu_next_entry(entry, member))
#endif /* _KERN_SLIST_H */
diff --git a/kern/thread.c b/kern/thread.c
index 8460f3b..926ca74 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -97,7 +97,6 @@
#include <kern/init.h>
#include <kern/kmem.h>
#include <kern/list.h>
-#include <kern/llsync.h>
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
@@ -624,8 +623,6 @@ thread_runq_schedule(struct thread_runq *runq)
assert(!cpu_intr_enabled());
spinlock_assert_locked(&runq->lock);
- llsync_report_context_switch();
-
thread_clear_flag(prev, THREAD_YIELD);
thread_runq_put_prev(runq, prev);
@@ -1831,7 +1828,6 @@ thread_init(struct thread *thread, void *stack,
thread->preempt_level = THREAD_SUSPEND_PREEMPT_LEVEL;
thread->pin_level = 0;
thread->intr_level = 0;
- thread->llsync_level = 0;
rcu_reader_init(&thread->rcu_reader);
cpumap_copy(&thread->cpumap, cpumap);
thread_set_user_sched_policy(thread, attr->policy);
@@ -2120,7 +2116,6 @@ thread_idle(void *arg)
for (;;) {
thread_preempt_disable();
- llsync_unregister();
for (;;) {
cpu_intr_disable();
@@ -2133,7 +2128,6 @@ thread_idle(void *arg)
cpu_idle();
}
- llsync_register();
thread_preempt_enable();
}
}
@@ -2621,7 +2615,6 @@ thread_run_scheduler(void)
assert(thread == runq->current);
assert(thread->preempt_level == (THREAD_SUSPEND_PREEMPT_LEVEL - 1));
- llsync_register();
sref_register();
spinlock_lock(&runq->lock);
diff --git a/kern/thread.h b/kern/thread.h
index c4dbd5d..f2cbfa0 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -644,41 +644,6 @@ thread_intr_leave(void)
}
/*
- * Lockless synchronization read-side critical section level control functions.
- */
-
-static inline int
-thread_llsync_in_read_cs(void)
-{
- struct thread *thread;
-
- thread = thread_self();
- return thread->llsync_level != 0;
-}
-
-static inline void
-thread_llsync_read_inc(void)
-{
- struct thread *thread;
-
- thread = thread_self();
- thread->llsync_level++;
- assert(thread->llsync_level != 0);
- barrier();
-}
-
-static inline void
-thread_llsync_read_dec(void)
-{
- struct thread *thread;
-
- barrier();
- thread = thread_self();
- assert(thread->llsync_level != 0);
- thread->llsync_level--;
-}
-
-/*
* RCU functions.
*/
diff --git a/kern/thread_i.h b/kern/thread_i.h
index 979d4b0..f46b956 100644
--- a/kern/thread_i.h
+++ b/kern/thread_i.h
@@ -135,9 +135,7 @@ struct thread {
/* Interrupt level, in thread context if 0 */
unsigned short intr_level; /* (-) */
- /* Read-side critical section level, not in any if 0 */
- unsigned short llsync_level; /* (-) */
-
+ /* RCU per-thread data */
struct rcu_reader rcu_reader; /* (-) */
/* Processors on which this thread is allowed to run */
diff --git a/test/Kconfig b/test/Kconfig
index 6a6221b..5c3072d 100644
--- a/test/Kconfig
+++ b/test/Kconfig
@@ -9,9 +9,6 @@ if TEST_MODULE
choice
prompt "Select test module"
-config TEST_MODULE_LLSYNC_DEFER
- bool "llsync_defer"
-
config TEST_MODULE_MUTEX
bool "mutex"
select MUTEX_DEBUG
diff --git a/test/Makefile b/test/Makefile
index b7ac3d7..96541af 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -1,4 +1,3 @@
-x15_SOURCES-$(CONFIG_TEST_MODULE_LLSYNC_DEFER) += test/test_llsync_defer.c
x15_SOURCES-$(CONFIG_TEST_MODULE_MUTEX) += test/test_mutex.c
x15_SOURCES-$(CONFIG_TEST_MODULE_MUTEX_PI) += test/test_mutex_pi.c
x15_SOURCES-$(CONFIG_TEST_MODULE_PMAP_UPDATE_MP) += test/test_pmap_update_mp.c
diff --git a/test/test_llsync_defer.c b/test/test_llsync_defer.c
deleted file mode 100644
index b57efc2..0000000
--- a/test/test_llsync_defer.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright (c) 2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * This test module is a stress test, expected to never terminate, of the
- * work deferring functionality of the llsync module. It creates three
- * threads, a producer, a consumer, and a "peeker". The producer allocates
- * a page and writes it. It then transfers the page to the consumer, using
- * the llsync interface to update the global page pointer. Once at the
- * consumer, the llsync interface is used to defer the release of the page.
- * Concurrently, the peeker accesses the page and checks its content when
- * available. These accesses are performed inside a read-side critical
- * section and should therefore never fail.
- *
- * Each thread regularly prints a string to report that it's making progress.
- */
-
-#include <stddef.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <kern/condition.h>
-#include <kern/error.h>
-#include <kern/init.h>
-#include <kern/kmem.h>
-#include <kern/llsync.h>
-#include <kern/macros.h>
-#include <kern/mutex.h>
-#include <kern/panic.h>
-#include <kern/thread.h>
-#include <kern/work.h>
-#include <machine/page.h>
-#include <test/test.h>
-#include <vm/vm_kmem.h>
-
-#define TEST_LOOPS_PER_PRINT 100000
-
-struct test_pdsc {
- struct work work;
- void *addr;
-};
-
-#define TEST_VALIDATION_BYTE 0xab
-
-static struct mutex test_lock;
-static struct condition test_condition;
-static struct test_pdsc *test_pdsc;
-
-static struct kmem_cache test_pdsc_cache;
-
-static void
-test_alloc(void *arg)
-{
- struct test_pdsc *pdsc;
- unsigned long i;
-
- (void)arg;
-
- i = 0;
-
- mutex_lock(&test_lock);
-
- for (;;) {
- while (test_pdsc != NULL) {
- condition_wait(&test_condition, &test_lock);
- }
-
- pdsc = kmem_cache_alloc(&test_pdsc_cache);
-
- if (pdsc != NULL) {
- pdsc->addr = vm_kmem_alloc(PAGE_SIZE);
-
- if (pdsc->addr != NULL) {
- memset(pdsc->addr, TEST_VALIDATION_BYTE, PAGE_SIZE);
- }
- }
-
- llsync_store_ptr(test_pdsc, pdsc);
- condition_signal(&test_condition);
-
- if ((i % TEST_LOOPS_PER_PRINT) == 0) {
- printf("alloc ");
- }
-
- i++;
- }
-}
-
-static void
-test_deferred_free(struct work *work)
-{
- struct test_pdsc *pdsc;
-
- pdsc = structof(work, struct test_pdsc, work);
-
- if (pdsc->addr != NULL) {
- vm_kmem_free(pdsc->addr, PAGE_SIZE);
- }
-
- kmem_cache_free(&test_pdsc_cache, pdsc);
-}
-
-static void
-test_free(void *arg)
-{
- struct test_pdsc *pdsc;
- unsigned long i;
-
- (void)arg;
-
- i = 0;
-
- mutex_lock(&test_lock);
-
- for (;;) {
- while (test_pdsc == NULL) {
- condition_wait(&test_condition, &test_lock);
- }
-
- pdsc = test_pdsc;
- llsync_store_ptr(test_pdsc, NULL);
-
- if (pdsc != NULL) {
- work_init(&pdsc->work, test_deferred_free);
- llsync_defer(&pdsc->work);
- }
-
- condition_signal(&test_condition);
-
- if ((i % TEST_LOOPS_PER_PRINT) == 0) {
- printf("free ");
- }
-
- i++;
- }
-}
-
-static void
-test_read(void *arg)
-{
- const struct test_pdsc *pdsc;
- const unsigned char *s;
- unsigned long i, j;
-
- (void)arg;
-
- i = 0;
-
- for (;;) {
- llsync_read_enter();
-
- pdsc = llsync_load_ptr(test_pdsc);
-
- if (pdsc != NULL) {
- s = (const unsigned char *)pdsc->addr;
-
- if (s != NULL) {
- for (j = 0; j < PAGE_SIZE; j++) {
- if (s[j] != TEST_VALIDATION_BYTE) {
- panic("invalid content");
- }
- }
-
- if ((i % TEST_LOOPS_PER_PRINT) == 0) {
- printf("read ");
- }
-
- i++;
- }
- }
-
- llsync_read_exit();
- }
-}
-
-void __init
-test_setup(void)
-{
- struct thread_attr attr;
- struct thread *thread;
- int error;
-
- condition_init(&test_condition);
- mutex_init(&test_lock);
-
- kmem_cache_init(&test_pdsc_cache, "test_pdsc",
- sizeof(struct test_pdsc), 0, NULL, 0);
-
- thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_alloc");
- thread_attr_set_detached(&attr);
- error = thread_create(&thread, &attr, test_alloc, NULL);
- error_check(error, "thread_create");
-
- thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_free");
- thread_attr_set_detached(&attr);
- error = thread_create(&thread, &attr, test_free, NULL);
- error_check(error, "thread_create");
-
- thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_read");
- thread_attr_set_detached(&attr);
- error = thread_create(&thread, &attr, test_read, NULL);
- error_check(error, "thread_create");
-}
diff --git a/tools/build_configs.py b/tools/build_configs.py
index 1770e4b..129ed92 100755
--- a/tools/build_configs.py
+++ b/tools/build_configs.py
@@ -104,10 +104,10 @@ large_options_dict.update({
})
test_list = [
- 'CONFIG_TEST_MODULE_LLSYNC_DEFER',
'CONFIG_TEST_MODULE_MUTEX',
'CONFIG_TEST_MODULE_MUTEX_PI',
'CONFIG_TEST_MODULE_PMAP_UPDATE_MP',
+ 'CONFIG_TEST_MODULE_RCU_DEFER',
'CONFIG_TEST_MODULE_SREF_DIRTY_ZEROES',
'CONFIG_TEST_MODULE_SREF_NOREF',
'CONFIG_TEST_MODULE_SREF_WEAKREF',
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 707008e..2cb4eed 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -24,8 +24,8 @@
#include <stdint.h>
#include <kern/init.h>
-#include <kern/llsync.h>
#include <kern/mutex.h>
+#include <kern/rcu.h>
#include <kern/rdxtree.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
@@ -132,7 +132,7 @@ vm_object_lookup(struct vm_object *object, uint64_t offset)
struct vm_page *page;
int error;
- llsync_read_enter();
+ rcu_read_enter();
do {
page = rdxtree_lookup(&object->pages, vm_page_btop(offset));
@@ -144,7 +144,7 @@ vm_object_lookup(struct vm_object *object, uint64_t offset)
error = vm_page_tryref(page);
} while (error);
- llsync_read_exit();
+ rcu_read_leave();
return page;
}