summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefrag.am3
-rw-r--r--kern/clock.c2
-rw-r--r--kern/timer.c536
-rw-r--r--kern/timer.h103
-rw-r--r--kern/timer_i.h41
5 files changed, 685 insertions, 0 deletions
diff --git a/Makefrag.am b/Makefrag.am
index 336060d2..f29e5926 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -111,6 +111,9 @@ x15_SOURCES += \
kern/thread.c \
kern/thread.h \
kern/thread_i.h \
+ kern/timer.c \
+ kern/timer.h \
+ kern/timer_i.h \
kern/turnstile.c \
kern/turnstile.h \
kern/turnstile_types.h \
diff --git a/kern/clock.c b/kern/clock.c
index d8af3f4c..b4a6f046 100644
--- a/kern/clock.c
+++ b/kern/clock.c
@@ -27,6 +27,7 @@
#include <kern/sref.h>
#include <kern/syscnt.h>
#include <kern/thread.h>
+#include <kern/timer.h>
#include <kern/work.h>
#include <machine/boot.h>
#include <machine/cpu.h>
@@ -89,6 +90,7 @@ void clock_tick_intr(void)
#endif /* ATOMIC_HAVE_64B_OPS */
}
+ timer_report_periodic_event();
llsync_report_periodic_event();
sref_report_periodic_event();
work_report_periodic_event();
diff --git a/kern/timer.c b/kern/timer.c
new file mode 100644
index 00000000..25f59f24
--- /dev/null
+++ b/kern/timer.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation is based on "Hashed and Hierarchical Timing Wheels:
+ * Efficient Data Structures for Implementing a Timer Facility" by George
+ * Varghese and Tony Lauck. Specifically, it implements scheme 6.1.2.
+ */
+
+#include <assert.h>
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <kern/clock.h>
+#include <kern/error.h>
+#include <kern/init.h>
+#include <kern/hlist.h>
+#include <kern/macros.h>
+#include <kern/panic.h>
+#include <kern/percpu.h>
+#include <kern/spinlock.h>
+#include <kern/thread.h>
+#include <kern/timer.h>
+#include <kern/timer_i.h>
+#include <kern/work.h>
+#include <machine/boot.h>
+#include <machine/cpu.h>
+
+/*
+ * Timer states.
+ */
+#define TIMER_TS_READY 1
+#define TIMER_TS_SCHEDULED 2
+#define TIMER_TS_RUNNING 3
+#define TIMER_TS_DONE 4
+
+/*
+ * Timer flags.
+ */
+#define TIMER_TF_DETACHED 0x1
+#define TIMER_TF_INTR 0x2
+#define TIMER_TF_HIGH_PRIO 0x4
+#define TIMER_TF_CANCELED 0x8
+
+#define TIMER_HTABLE_SIZE 2048
+
+#if !ISP2(TIMER_HTABLE_SIZE)
+#error "hash table size must be a power of two"
+#endif /* !ISP2(TIMER_HTABLE_SIZE) */
+
+#define TIMER_HTABLE_MASK (TIMER_HTABLE_SIZE - 1)
+
+struct timer_bucket {
+ struct hlist timers;
+};
+
+/*
+ * The hash table bucket matching the last time member has already been
+ * processed, and the next periodic event resumes from the next bucket.
+ *
+ * Locking order: interrupts -> timer_cpu_data.
+ */
+struct timer_cpu_data {
+ unsigned int cpu;
+ struct spinlock lock;
+ uint64_t last_time;
+ struct timer_bucket htable[TIMER_HTABLE_SIZE];
+};
+
+static struct timer_cpu_data timer_cpu_data __percpu;
+
+static struct timer_cpu_data *
+timer_lock_cpu_data(struct timer *timer, unsigned long *flagsp)
+{
+ struct timer_cpu_data *cpu_data;
+
+ cpu_data = percpu_ptr(timer_cpu_data, timer->cpu);
+ spinlock_lock_intr_save(&cpu_data->lock, flagsp);
+ return cpu_data;
+}
+
+static void
+timer_unlock_cpu_data(struct timer_cpu_data *cpu_data, unsigned long flags)
+{
+ spinlock_unlock_intr_restore(&cpu_data->lock, flags);
+}
+
+/*
+ * Timer state functions.
+ */
+
+static bool
+timer_ready(const struct timer *timer)
+{
+ return timer->state == TIMER_TS_READY;
+}
+
+static void
+timer_set_ready(struct timer *timer)
+{
+ timer->state = TIMER_TS_READY;
+}
+
+static bool
+timer_scheduled(const struct timer *timer)
+{
+ return timer->state == TIMER_TS_SCHEDULED;
+}
+
+static void
+timer_set_scheduled(struct timer *timer, unsigned int cpu)
+{
+ timer->cpu = cpu;
+ timer->state = TIMER_TS_SCHEDULED;
+}
+
+static bool
+timer_running(const struct timer *timer)
+{
+ return timer->state == TIMER_TS_RUNNING;
+}
+
+static void
+timer_set_running(struct timer *timer)
+{
+ timer->state = TIMER_TS_RUNNING;
+}
+
+static bool
+timer_done(const struct timer *timer)
+{
+ return timer->state == TIMER_TS_DONE;
+}
+
+static void
+timer_set_done(struct timer *timer)
+{
+ timer->state = TIMER_TS_DONE;
+}
+
+/*
+ * Timer flags functions.
+ */
+
+static bool
+timer_detached(const struct timer *timer)
+{
+ return timer->flags & TIMER_TF_DETACHED;
+}
+
+static void
+timer_set_detached(struct timer *timer)
+{
+ timer->flags |= TIMER_TF_DETACHED;
+}
+
+static bool
+timer_is_intr(const struct timer *timer)
+{
+ return timer->flags & TIMER_TF_INTR;
+}
+
+static void
+timer_set_intr(struct timer *timer)
+{
+ timer->flags |= TIMER_TF_INTR;
+}
+
+static bool
+timer_is_high_prio(const struct timer *timer)
+{
+ return timer->flags & TIMER_TF_HIGH_PRIO;
+}
+
+static void
+timer_set_high_prio(struct timer *timer)
+{
+ timer->flags |= TIMER_TF_HIGH_PRIO;
+}
+
+static bool
+timer_canceled(const struct timer *timer)
+{
+ return timer->flags & TIMER_TF_CANCELED;
+}
+
+static void
+timer_set_canceled(struct timer *timer)
+{
+ timer->flags |= TIMER_TF_CANCELED;
+}
+
+static void
+timer_set_time(struct timer *timer, uint64_t ticks)
+{
+ timer->ticks = ticks;
+}
+
+static bool
+timer_occurred(const struct timer *timer, uint64_t ref)
+{
+ return clock_time_occurred(timer->ticks, ref);
+}
+
+static uintptr_t
+timer_hash(uint64_t ticks)
+{
+ return ticks;
+}
+
+static void
+timer_run(struct timer *timer)
+{
+ struct timer_cpu_data *cpu_data;
+ unsigned long cpu_flags;
+
+ assert(timer_running(timer));
+
+ timer->fn(timer);
+
+ if (timer_detached(timer)) {
+ return;
+ }
+
+ cpu_data = timer_lock_cpu_data(timer, &cpu_flags);
+
+ /*
+ * The timer handler may have :
+ * - rescheduled itself
+ * - been canceled
+ * - none of the above
+ *
+ * If the handler didn't call a timer function, or if the timer was
+ * canceled, set the state to done and wake up the joiner, if any.
+ *
+ * If the handler rescheduled the timer, nothing must be done. This
+ * is also true if the timer was canceled after being rescheduled by
+ * the handler (in this case, cancellation won't wait for a signal).
+ * These cases can be identified by checking if the timer state is
+ * different from running.
+ */
+
+ if (timer_running(timer)) {
+ timer_set_done(timer);
+ thread_wakeup(timer->joiner);
+ }
+
+ timer_unlock_cpu_data(cpu_data, cpu_flags);
+}
+
+static void
+timer_run_work(struct work *work)
+{
+ struct timer *timer;
+
+ timer = structof(work, struct timer, work);
+ timer_run(timer);
+}
+
+static void
+timer_process(struct timer *timer)
+{
+ int work_flags;
+
+ if (timer_is_intr(timer)) {
+ timer_run(timer);
+ return;
+ }
+
+ if (timer_is_high_prio(timer)) {
+ work_flags = TIMER_TF_HIGH_PRIO;
+ } else {
+ work_flags = 0;
+ }
+
+ work_init(&timer->work, timer_run_work);
+ work_schedule(&timer->work, work_flags);
+}
+
+static void
+timer_bucket_init(struct timer_bucket *bucket)
+{
+ hlist_init(&bucket->timers);
+}
+
+static void
+timer_bucket_add(struct timer_bucket *bucket, struct timer *timer)
+{
+ hlist_insert_head(&bucket->timers, &timer->node);
+}
+
+static void
+timer_bucket_remove(struct timer_bucket *bucket, struct timer *timer)
+{
+ (void)bucket;
+ hlist_remove(&timer->node);
+}
+
+static void
+timer_cpu_data_init(struct timer_cpu_data *cpu_data, unsigned int cpu)
+{
+ cpu_data->cpu = cpu;
+ spinlock_init(&cpu_data->lock);
+
+ /* See periodic event handling */
+ cpu_data->last_time = clock_get_time() - 1;
+
+ for (size_t i = 0; i < ARRAY_SIZE(cpu_data->htable); i++) {
+ timer_bucket_init(&cpu_data->htable[i]);
+ }
+}
+
+static struct timer_cpu_data *
+timer_cpu_data_acquire_local(unsigned long *flags)
+{
+ struct timer_cpu_data *cpu_data;
+
+ thread_pin();
+ cpu_data = cpu_local_ptr(timer_cpu_data);
+ spinlock_lock_intr_save(&cpu_data->lock, flags);
+ return cpu_data;
+}
+
+static void
+timer_cpu_data_release_local(struct timer_cpu_data *cpu_data,
+ unsigned long flags)
+{
+ spinlock_unlock_intr_restore(&cpu_data->lock, flags);
+ thread_unpin();
+}
+
+static struct timer_bucket *
+timer_cpu_data_get_bucket(struct timer_cpu_data *cpu_data, uint64_t ticks)
+{
+ uintptr_t index;
+
+ index = timer_hash(ticks) & TIMER_HTABLE_MASK;
+ assert(index < ARRAY_SIZE(cpu_data->htable));
+ return &cpu_data->htable[index];
+}
+
+static void
+timer_cpu_data_add(struct timer_cpu_data *cpu_data, struct timer *timer)
+{
+ struct timer_bucket *bucket;
+
+ assert(timer_ready(timer));
+
+ bucket = timer_cpu_data_get_bucket(cpu_data, timer->ticks);
+ timer_bucket_add(bucket, timer);
+}
+
+static void
+timer_cpu_data_remove(struct timer_cpu_data *cpu_data, struct timer *timer)
+{
+ struct timer_bucket *bucket;
+
+ assert(timer_scheduled(timer));
+
+ bucket = timer_cpu_data_get_bucket(cpu_data, timer->ticks);
+ timer_bucket_remove(bucket, timer);
+}
+
+static void
+timer_bucket_filter(struct timer_bucket *bucket, uint64_t now,
+ struct hlist *timers)
+{
+ struct timer *timer, *tmp;
+
+ hlist_for_each_entry_safe(&bucket->timers, timer, tmp, node) {
+ assert(timer_scheduled(timer));
+
+ if (!timer_occurred(timer, now)) {
+ continue;
+ }
+
+ hlist_remove(&timer->node);
+ timer_set_running(timer);
+ hlist_insert_head(timers, &timer->node);
+ }
+}
+
+static int __init
+timer_setup(void)
+{
+ for (unsigned int cpu = 0; cpu < cpu_count(); cpu++) {
+ timer_cpu_data_init(percpu_ptr(timer_cpu_data, cpu), cpu);
+ }
+
+ return 0;
+}
+
+INIT_OP_DEFINE(timer_setup,
+ INIT_OP_DEP(boot_setup_intr, true),
+ INIT_OP_DEP(cpu_mp_probe, true));
+
+void timer_init(struct timer *timer, timer_fn_t fn, int flags)
+{
+ timer->fn = fn;
+ timer->state = TIMER_TS_READY;
+ timer->flags = 0;
+ timer->joiner = NULL;
+
+ if (flags & TIMER_DETACHED) {
+ timer_set_detached(timer);
+ }
+
+ if (flags & TIMER_INTR) {
+ timer_set_intr(timer);
+ } else if (flags & TIMER_HIGH_PRIO) {
+ timer_set_high_prio(timer);
+ }
+}
+
+void
+timer_schedule(struct timer *timer, uint64_t ticks)
+{
+ struct timer_cpu_data *cpu_data;
+ unsigned long cpu_flags;
+
+ cpu_data = timer_cpu_data_acquire_local(&cpu_flags);
+
+ if (timer_canceled(timer)) {
+ goto out;
+ }
+
+ /*
+ * If called from the handler, the timer is running. If rescheduled
+ * after completion, it's done.
+ */
+ if (timer_running(timer) || timer_done(timer)) {
+ timer_set_ready(timer);
+ }
+
+ timer_set_time(timer, ticks);
+
+ if (timer_occurred(timer, cpu_data->last_time)) {
+ ticks = cpu_data->last_time + 1;
+ }
+
+ timer_cpu_data_add(cpu_data, timer);
+ timer_set_scheduled(timer, cpu_data->cpu);
+
+out:
+ timer_cpu_data_release_local(cpu_data, cpu_flags);
+}
+
+void
+timer_cancel(struct timer *timer)
+{
+ struct timer_cpu_data *cpu_data;
+ unsigned long cpu_flags;
+
+ assert(!timer_detached(timer));
+
+ cpu_data = timer_lock_cpu_data(timer, &cpu_flags);
+
+ assert(timer->joiner == NULL);
+
+ timer_set_canceled(timer);
+
+ if (timer_scheduled(timer)) {
+ timer_cpu_data_remove(cpu_data, timer);
+ } else {
+ timer->joiner = thread_self();
+
+ while (!timer_done(timer)) {
+ if (timer_is_intr(timer)) {
+ timer_unlock_cpu_data(cpu_data, cpu_flags);
+ cpu_pause();
+ cpu_data = timer_lock_cpu_data(timer, &cpu_flags);
+ } else {
+ thread_sleep(&cpu_data->lock, timer, "tmr_cncl");
+ }
+ }
+
+ assert(timer_done(timer));
+
+ timer->joiner = NULL;
+ }
+
+ timer_set_ready(timer);
+
+ timer_unlock_cpu_data(cpu_data, cpu_flags);
+}
+
+void
+timer_report_periodic_event(void)
+{
+ struct timer_cpu_data *cpu_data;
+ struct timer_bucket *bucket;
+ struct timer *timer;
+ struct hlist timers;
+ uint64_t ticks, now;
+
+ assert(!cpu_intr_enabled());
+ assert(!thread_preempt_enabled());
+
+ now = clock_get_time();
+ hlist_init(&timers);
+ cpu_data = cpu_local_ptr(timer_cpu_data);
+
+ spinlock_lock(&cpu_data->lock);
+
+ for (ticks = cpu_data->last_time + 1;
+ clock_time_occurred(ticks, now);
+ ticks++) {
+ bucket = timer_cpu_data_get_bucket(cpu_data, ticks);
+ timer_bucket_filter(bucket, now, &timers);
+ }
+
+ cpu_data->last_time = now;
+
+ spinlock_unlock(&cpu_data->lock);
+
+ while (!hlist_empty(&timers)) {
+ timer = hlist_first_entry(&timers, struct timer, node);
+ hlist_remove(&timer->node);
+ timer_process(timer);
+ }
+}
diff --git a/kern/timer.h b/kern/timer.h
new file mode 100644
index 00000000..46616686
--- /dev/null
+++ b/kern/timer.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Low resolution timer system.
+ */
+
+#ifndef _KERN_TIMER_H
+#define _KERN_TIMER_H
+
+#include <stdint.h>
+
+#include <kern/init.h>
+
+/*
+ * Scheduling flags.
+ */
+#define TIMER_DETACHED 0x1 /* Timer completion isn't synchronized */
+#define TIMER_INTR 0x2 /* Handler is run from interrupt context */
+#define TIMER_HIGH_PRIO 0x4 /* Handler is run in high priority thread */
+
+struct timer;
+
+/*
+ * Type for timer functions.
+ */
+typedef void (*timer_fn_t)(struct timer *);
+
+#include <kern/timer_i.h>
+
+/*
+ * Return the absolute expiration time of the timer, in ticks.
+ */
+static inline uint64_t
+timer_get_time(const struct timer *timer)
+{
+ return timer->ticks; /* TODO atomic */
+}
+
+/*
+ * Initialize a timer.
+ *
+ * Timers that are reponsible for releasing their own resources must
+ * be detached.
+ */
+void timer_init(struct timer *timer, timer_fn_t fn, int flags);
+
+/*
+ * Schedule a timer.
+ *
+ * The time of expiration is an absolute time in ticks.
+ *
+ * Timers may safely be rescheduled after completion. Periodic timers are
+ * implemented by rescheduling from the handler.
+ *
+ * If the timer has been canceled, this function does nothing. A
+ * canceled timer must be reinitialized before being scheduled again.
+ */
+void timer_schedule(struct timer *timer, uint64_t ticks);
+
+/*
+ * Cancel a timer.
+ *
+ * The given timer must not be detached.
+ *
+ * If the timer has already expired, this function waits until the timer
+ * function completes, or returns immediately if the function has already
+ * completed.
+ *
+ * This function may safely be called from the timer handler, but not on
+ * the current timer. Canceling a timer from the handler is achieved by
+ * simply not rescheduling it.
+ */
+void timer_cancel(struct timer *timer);
+
+/*
+ * Report a periodic event on the current processor.
+ *
+ * Interrupts and preemption must be disabled when calling this function.
+ */
+void timer_report_periodic_event(void);
+
+/*
+ * This init operation provides :
+ * - timer initialization and scheduling
+ * - module fully initialized
+ */
+INIT_OP_DECLARE(timer_setup);
+
+#endif /* _KERN_TIMER_H */
diff --git a/kern/timer_i.h b/kern/timer_i.h
new file mode 100644
index 00000000..4ed01f22
--- /dev/null
+++ b/kern/timer_i.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _KERN_TIMER_I_H
+#define _KERN_TIMER_I_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <kern/hlist.h>
+#include <kern/work.h>
+
+struct timer {
+ union {
+ struct hlist_node node;
+ struct work work;
+ };
+
+ uint64_t ticks;
+ timer_fn_t fn;
+ unsigned int cpu;
+ unsigned short state;
+ unsigned short flags;
+ struct thread *joiner;
+};
+
+#endif /* _KERN_TIMER_I_H */