summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-02-20 22:57:21 +0100
committerRichard Braun <rbraun@sceen.net>2018-02-20 22:57:21 +0100
commit326118bf300cf096cee04cb0a64789151ef8e273 (patch)
tree082660557b642253771c483b1d535e8d3f95eb75
parentda8eb9c244d27fd042adde6234ccec079681d7f4 (diff)
Rework the initialization operations of some kernel modules
In order to avoid workarounds that check whether a module is ready or not, break the initialization of some core modules into a bootstrap step for basic BSP initialization, and a setup step that completes initialization. Most users only need the bootstrap operation as a dependency, especially since scheduling isn't enabled yet.
-rw-r--r--kern/clock.c1
-rw-r--r--kern/clock.h2
-rw-r--r--kern/llsync.c3
-rw-r--r--kern/rcu.c25
-rw-r--r--kern/rcu.h5
-rw-r--r--kern/sref.c4
-rw-r--r--kern/sref.h4
-rw-r--r--kern/timer.c17
-rw-r--r--kern/timer.h3
-rw-r--r--kern/work.c64
-rw-r--r--kern/work.h5
11 files changed, 85 insertions, 48 deletions
diff --git a/kern/clock.c b/kern/clock.c
index c09ffdd..c69bf42 100644
--- a/kern/clock.c
+++ b/kern/clock.c
@@ -61,7 +61,6 @@ clock_setup(void)
}
INIT_OP_DEFINE(clock_setup,
- INIT_OP_DEP(boot_setup_intr, true),
INIT_OP_DEP(cpu_mp_probe, true),
INIT_OP_DEP(syscnt_setup, true));
diff --git a/kern/clock.h b/kern/clock.h
index 854c146..3b695d5 100644
--- a/kern/clock.h
+++ b/kern/clock.h
@@ -116,6 +116,4 @@ clock_time_occurred(uint64_t t, uint64_t ref)
void clock_tick_intr(void);
-INIT_OP_DECLARE(clock_setup);
-
#endif /* _KERN_CLOCK_H */
diff --git a/kern/llsync.c b/kern/llsync.c
index 5fcbd73..b7b8b2f 100644
--- a/kern/llsync.c
+++ b/kern/llsync.c
@@ -113,8 +113,7 @@ INIT_OP_DEFINE(llsync_setup,
INIT_OP_DEP(mutex_setup, true),
INIT_OP_DEP(spinlock_setup, true),
INIT_OP_DEP(syscnt_setup, true),
- INIT_OP_DEP(thread_bootstrap, true),
- INIT_OP_DEP(work_setup, true));
+ INIT_OP_DEP(thread_bootstrap, true));
static void
llsync_process_global_checkpoint(void)
diff --git a/kern/rcu.c b/kern/rcu.c
index 82c62df..0f8bb6f 100644
--- a/kern/rcu.c
+++ b/kern/rcu.c
@@ -750,11 +750,24 @@ rcu_wait(void)
}
static int __init
-rcu_setup(void)
+rcu_bootstrap(void)
{
rcu_data_init(&rcu_data);
+ rcu_cpu_data_init(cpu_local_ptr(rcu_cpu_data), 0);
+ return 0;
+}
+
+INIT_OP_DEFINE(rcu_bootstrap,
+ INIT_OP_DEP(spinlock_setup, true),
+ INIT_OP_DEP(sref_bootstrap, true),
+ INIT_OP_DEP(syscnt_setup, true),
+ INIT_OP_DEP(thread_bootstrap, true),
+ INIT_OP_DEP(timer_bootstrap, true));
- for (size_t i = 0; i < cpu_count(); i++) {
+static int __init
+rcu_setup(void)
+{
+ for (size_t i = 1; i < cpu_count(); i++) {
rcu_cpu_data_init(percpu_ptr(rcu_cpu_data, i), i);
}
@@ -762,11 +775,5 @@ rcu_setup(void)
}
INIT_OP_DEFINE(rcu_setup,
- INIT_OP_DEP(clock_setup, true),
INIT_OP_DEP(cpu_mp_probe, true),
- INIT_OP_DEP(spinlock_setup, true),
- INIT_OP_DEP(sref_setup, true),
- INIT_OP_DEP(syscnt_setup, true),
- INIT_OP_DEP(thread_bootstrap, true),
- INIT_OP_DEP(timer_setup, true),
- INIT_OP_DEP(work_setup, true));
+ INIT_OP_DEP(rcu_bootstrap, true));
diff --git a/kern/rcu.h b/kern/rcu.h
index 1ddae7d..48079d0 100644
--- a/kern/rcu.h
+++ b/kern/rcu.h
@@ -138,9 +138,8 @@ void rcu_wait(void);
/*
* This init operation provides :
- * - read-side critical sections may be used
- * - module fully initialized
+ * - read-side critical sections usable
*/
-INIT_OP_DECLARE(rcu_setup);
+INIT_OP_DECLARE(rcu_bootstrap);
#endif /* _KERN_RCU_H */
diff --git a/kern/sref.c b/kern/sref.c
index f145f80..2b20cb4 100644
--- a/kern/sref.c
+++ b/kern/sref.c
@@ -827,6 +827,8 @@ sref_bootstrap(void)
}
INIT_OP_DEFINE(sref_bootstrap,
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(spinlock_setup, true),
INIT_OP_DEP(syscnt_setup, true));
static void __init
@@ -876,10 +878,10 @@ sref_setup(void)
INIT_OP_DEFINE(sref_setup,
INIT_OP_DEP(cpu_mp_probe, true),
+ INIT_OP_DEP(cpumap_setup, true),
INIT_OP_DEP(log_setup, true),
INIT_OP_DEP(panic_setup, true),
INIT_OP_DEP(sref_bootstrap, true),
- INIT_OP_DEP(syscnt_setup, true),
INIT_OP_DEP(thread_setup, true));
void
diff --git a/kern/sref.h b/kern/sref.h
index f035a64..fb62d83 100644
--- a/kern/sref.h
+++ b/kern/sref.h
@@ -109,8 +109,8 @@ struct sref_counter * sref_weakref_get(struct sref_weakref *weakref);
/*
* This init operation provides :
- * - module fully initialized
+ * - sref counter and weakref initialization and usage
*/
-INIT_OP_DECLARE(sref_setup);
+INIT_OP_DECLARE(sref_bootstrap);
#endif /* _KERN_SREF_H */
diff --git a/kern/timer.c b/kern/timer.c
index f61ab71..cafe49f 100644
--- a/kern/timer.c
+++ b/kern/timer.c
@@ -410,9 +410,20 @@ timer_bucket_filter(struct timer_bucket *bucket, uint64_t now,
}
static int __init
+timer_bootstrap(void)
+{
+ timer_cpu_data_init(cpu_local_ptr(timer_cpu_data), 0);
+ return 0;
+}
+
+INIT_OP_DEFINE(timer_bootstrap,
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(spinlock_setup, true));
+
+static int __init
timer_setup(void)
{
- for (unsigned int cpu = 0; cpu < cpu_count(); cpu++) {
+ for (unsigned int cpu = 1; cpu < cpu_count(); cpu++) {
timer_cpu_data_init(percpu_ptr(timer_cpu_data, cpu), cpu);
}
@@ -420,8 +431,8 @@ timer_setup(void)
}
INIT_OP_DEFINE(timer_setup,
- INIT_OP_DEP(boot_setup_intr, true),
- INIT_OP_DEP(cpu_mp_probe, true));
+ INIT_OP_DEP(cpu_mp_probe, true),
+ INIT_OP_DEP(spinlock_setup, true));
void
timer_init(struct timer *timer, timer_fn_t fn, int flags)
diff --git a/kern/timer.h b/kern/timer.h
index ddace45..d47e5a7 100644
--- a/kern/timer.h
+++ b/kern/timer.h
@@ -99,8 +99,7 @@ void timer_report_periodic_event(void);
/*
* This init operation provides :
* - timer initialization and scheduling
- * - module fully initialized
*/
-INIT_OP_DECLARE(timer_setup);
+INIT_OP_DECLARE(timer_bootstrap);
#endif /* _KERN_TIMER_H */
diff --git a/kern/work.c b/kern/work.c
index c1bdede..d496961 100644
--- a/kern/work.c
+++ b/kern/work.c
@@ -102,7 +102,6 @@ struct work_pool {
};
static int work_thread_create(struct work_pool *pool, unsigned int id);
-static void work_thread_destroy(struct work_thread *worker);
static struct work_pool work_pool_cpu_main __percpu;
static struct work_pool work_pool_cpu_highprio __percpu;
@@ -158,7 +157,16 @@ work_pool_compute_max_threads(unsigned int nr_cpus)
}
static void __init
-work_pool_init(struct work_pool *pool, unsigned int cpu, int flags)
+work_pool_init(struct work_pool *pool)
+{
+ spinlock_init(&pool->lock);
+ work_queue_init(&pool->queue0);
+ work_queue_init(&pool->queue1);
+ pool->manager = NULL;
+}
+
+static void __init
+work_pool_build(struct work_pool *pool, unsigned int cpu, int flags)
{
char name[SYSCNT_NAME_SIZE];
const char *suffix;
@@ -180,10 +188,6 @@ work_pool_init(struct work_pool *pool, unsigned int cpu, int flags)
max_threads = work_pool_compute_max_threads(nr_cpus);
- spinlock_init(&pool->lock);
- work_queue_init(&pool->queue0);
- work_queue_init(&pool->queue1);
- pool->manager = NULL;
pool->max_threads = max_threads;
pool->nr_threads = 0;
pool->nr_available_threads = 0;
@@ -292,6 +296,13 @@ work_pool_concat_queue(struct work_pool *pool, struct work_queue *queue)
}
static void
+work_thread_destroy(struct work_thread *worker)
+{
+ thread_join(worker->thread);
+ kmem_cache_free(&work_thread_cache, worker);
+}
+
+static void
work_process(void *arg)
{
struct work_thread *self, *worker;
@@ -464,30 +475,42 @@ error_cpumap:
return error;
}
-static void
-work_thread_destroy(struct work_thread *worker)
+static int __init
+work_bootstrap(void)
{
- thread_join(worker->thread);
- kmem_cache_free(&work_thread_cache, worker);
+ work_pool_init(cpu_local_ptr(work_pool_cpu_main));
+ work_pool_init(cpu_local_ptr(work_pool_cpu_highprio));
+ return 0;
}
+INIT_OP_DEFINE(work_bootstrap,
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(spinlock_setup, true),
+ INIT_OP_DEP(thread_bootstrap, true));
+
static int __init
work_setup(void)
{
- unsigned int i;
-
kmem_cache_init(&work_thread_cache, "work_thread",
sizeof(struct work_thread), 0, NULL, 0);
- for (i = 0; i < cpu_count(); i++) {
- work_pool_init(percpu_ptr(work_pool_cpu_main, i), i, 0);
- work_pool_init(percpu_ptr(work_pool_cpu_highprio, i), i,
- WORK_PF_HIGHPRIO);
+ for (unsigned int i = 1; i < cpu_count(); i++) {
+ work_pool_init(percpu_ptr(work_pool_cpu_main, i));
+ work_pool_init(percpu_ptr(work_pool_cpu_highprio, i));
+ }
+
+ work_pool_init(&work_pool_main);
+ work_pool_init(&work_pool_highprio);
+
+ for (unsigned int i = 0; i < cpu_count(); i++) {
+ work_pool_build(percpu_ptr(work_pool_cpu_main, i), i, 0);
+ work_pool_build(percpu_ptr(work_pool_cpu_highprio, i), i,
+ WORK_PF_HIGHPRIO);
}
- work_pool_init(&work_pool_main, WORK_INVALID_CPU, WORK_PF_GLOBAL);
- work_pool_init(&work_pool_highprio, WORK_INVALID_CPU,
- WORK_PF_GLOBAL | WORK_PF_HIGHPRIO);
+ work_pool_build(&work_pool_main, WORK_INVALID_CPU, WORK_PF_GLOBAL);
+ work_pool_build(&work_pool_highprio, WORK_INVALID_CPU,
+ WORK_PF_GLOBAL | WORK_PF_HIGHPRIO);
log_info("work: threads per pool (per-cpu/global): %u/%u, spare: %u",
percpu_var(work_pool_cpu_main.max_threads, 0),
@@ -504,7 +527,8 @@ INIT_OP_DEFINE(work_setup,
INIT_OP_DEP(panic_setup, true),
INIT_OP_DEP(spinlock_setup, true),
INIT_OP_DEP(syscnt_setup, true),
- INIT_OP_DEP(thread_setup, true));
+ INIT_OP_DEP(thread_setup, true),
+ INIT_OP_DEP(work_bootstrap, true));
void
work_schedule(struct work *work, int flags)
diff --git a/kern/work.h b/kern/work.h
index 09aff38..ccd3c50 100644
--- a/kern/work.h
+++ b/kern/work.h
@@ -151,9 +151,8 @@ void work_report_periodic_event(void);
/*
* This init operation provides :
- * - works can be scheduled
- * - module fully initialized
+ * - work / work queue initialization and scheduling
*/
-INIT_OP_DECLARE(work_setup);
+INIT_OP_DECLARE(work_bootstrap);
#endif /* _KERN_WORK_H */