diff options
-rw-r--r-- | Makefrag.am | 2 | ||||
-rw-r--r-- | kern/kernel.c | 2 | ||||
-rw-r--r-- | kern/sleepq.c | 382 | ||||
-rw-r--r-- | kern/sleepq.h | 126 | ||||
-rw-r--r-- | kern/thread.c | 16 | ||||
-rw-r--r-- | kern/thread.h | 23 | ||||
-rw-r--r-- | kern/thread_i.h | 8 |
7 files changed, 557 insertions, 2 deletions
diff --git a/Makefrag.am b/Makefrag.am index a42c5ea8..3463dcdc 100644 --- a/Makefrag.am +++ b/Makefrag.am @@ -49,6 +49,8 @@ x15_SOURCES += \ kern/rdxtree.c \ kern/rdxtree.h \ kern/rdxtree_i.h \ + kern/sleepq.c \ + kern/sleepq.h \ kern/spinlock.h \ kern/spinlock_i.h \ kern/spinlock_types.h \ diff --git a/kern/kernel.c b/kern/kernel.c index b63be5d2..9b8d4eec 100644 --- a/kern/kernel.c +++ b/kern/kernel.c @@ -20,6 +20,7 @@ #include <kern/kernel.h> #include <kern/llsync.h> #include <kern/percpu.h> +#include <kern/sleepq.h> #include <kern/sref.h> #include <kern/task.h> #include <kern/thread.h> @@ -41,6 +42,7 @@ kernel_main(void) cpumap_setup(); xcall_setup(); task_setup(); + sleepq_setup(); thread_setup(); work_setup(); llsync_setup(); diff --git a/kern/sleepq.c b/kern/sleepq.c new file mode 100644 index 00000000..0c41caef --- /dev/null +++ b/kern/sleepq.c @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2017 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * TODO Analyse hash parameters. + */ + +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> + +#include <kern/assert.h> +#include <kern/init.h> +#include <kern/kmem.h> +#include <kern/list.h> +#include <kern/macros.h> +#include <kern/param.h> +#include <kern/sleepq.h> +#include <kern/spinlock.h> +#include <kern/thread.h> + +struct sleepq_bucket { + struct spinlock lock; + struct list list; +} __aligned(CPU_L1_SIZE); + +struct sleepq { + struct sleepq_bucket *bucket; + struct list node; + const void *sync_obj; + struct list waiters; + struct sleepq *next_free; +}; + +struct sleepq_waiter { + struct list node; + struct thread *thread; +}; + +#define SLEEPQ_HTABLE_SIZE 128 +#define SLEEPQ_COND_HTABLE_SIZE 64 + +#if !ISP2(SLEEPQ_HTABLE_SIZE) || !ISP2(SLEEPQ_COND_HTABLE_SIZE) +#error hash table size must be a power of two +#endif /* !ISP2(SLEEPQ_HTABLE_SIZE) */ + +#define SLEEPQ_HTABLE_MASK (SLEEPQ_HTABLE_SIZE - 1) +#define SLEEPQ_COND_HTABLE_MASK (SLEEPQ_COND_HTABLE_SIZE - 1) + +static struct sleepq_bucket sleepq_htable[SLEEPQ_HTABLE_SIZE]; +static struct sleepq_bucket sleepq_cond_htable[SLEEPQ_COND_HTABLE_SIZE]; + +static struct kmem_cache sleepq_cache; + +static uintptr_t +sleepq_hash(const void *addr) +{ + return ((uintptr_t)addr >> 8) ^ (uintptr_t)addr; +} + +static void +sleepq_waiter_init(struct sleepq_waiter *waiter, struct thread *thread) +{ + waiter->thread = thread; +} + +static void +sleepq_waiter_wakeup(struct sleepq_waiter *waiter) +{ + thread_wakeup(waiter->thread); +} + +static void +sleepq_assert_init_state(const struct sleepq *sleepq) +{ + assert(sleepq->bucket == NULL); + assert(sleepq->sync_obj == NULL); + assert(list_empty(&sleepq->waiters)); + assert(sleepq->next_free == NULL); +} + +static void +sleepq_use(struct sleepq *sleepq, const void *sync_obj) +{ + assert(sleepq->sync_obj == NULL); + sleepq->sync_obj = sync_obj; +} + +static void +sleepq_unuse(struct sleepq *sleepq) +{ + assert(sleepq->sync_obj != NULL); + sleepq->sync_obj = NULL; +} + +static bool +sleepq_in_use(const struct sleepq *sleepq) +{ + return sleepq->sync_obj != NULL; +} + +static bool +sleepq_in_use_by(const struct sleepq *sleepq, const void *sync_obj) +{ + return sleepq->sync_obj == sync_obj; +} + +static void +sleepq_bucket_init(struct sleepq_bucket *bucket) +{ + spinlock_init(&bucket->lock); + list_init(&bucket->list); +} + +static struct sleepq_bucket * +sleepq_bucket_get_cond(const void *sync_obj) +{ + uintptr_t index; + + index = sleepq_hash(sync_obj) & SLEEPQ_COND_HTABLE_MASK; + assert(index < ARRAY_SIZE(sleepq_cond_htable)); + return &sleepq_cond_htable[index]; +} + +static struct sleepq_bucket * +sleepq_bucket_get(const void *sync_obj, bool condition) +{ + uintptr_t index; + + if (condition) { + return sleepq_bucket_get_cond(sync_obj); + } + + index = sleepq_hash(sync_obj) & SLEEPQ_HTABLE_MASK; + assert(index < ARRAY_SIZE(sleepq_htable)); + return &sleepq_htable[index]; +} + +static void +sleepq_bucket_add(struct sleepq_bucket *bucket, struct sleepq *sleepq) +{ + assert(sleepq->bucket == NULL); + sleepq->bucket = bucket; + list_insert_tail(&bucket->list, &sleepq->node); +} + +static void +sleepq_bucket_remove(struct sleepq_bucket *bucket, struct sleepq *sleepq) +{ + assert(sleepq->bucket == bucket); + sleepq->bucket = NULL; + list_remove(&sleepq->node); +} + +static struct sleepq * +sleepq_bucket_lookup(const struct sleepq_bucket *bucket, const void *sync_obj) +{ + struct sleepq *sleepq; + + list_for_each_entry(&bucket->list, sleepq, node) { + if (sleepq_in_use_by(sleepq, sync_obj)) { + assert(sleepq->bucket == bucket); + return sleepq; + } + } + + return NULL; +} + +static void +sleepq_ctor(void *ptr) +{ + struct sleepq *sleepq; + + sleepq = ptr; + sleepq->bucket = NULL; + sleepq->sync_obj = NULL; + list_init(&sleepq->waiters); + sleepq->next_free = NULL; +} + +void __init +sleepq_setup(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sleepq_htable); i++) { + sleepq_bucket_init(&sleepq_htable[i]); + } + + for (i = 0; i < ARRAY_SIZE(sleepq_cond_htable); i++) { + sleepq_bucket_init(&sleepq_cond_htable[i]); + } + + kmem_cache_init(&sleepq_cache, "sleepq", sizeof(struct sleepq), + CPU_L1_SIZE, sleepq_ctor, 0); +} + +struct sleepq * +sleepq_create(void) +{ + struct sleepq *sleepq; + + sleepq = kmem_cache_alloc(&sleepq_cache); + + if (sleepq == NULL) { + return NULL; + } + + sleepq_assert_init_state(sleepq); + return sleepq; +} + +void +sleepq_destroy(struct sleepq *sleepq) +{ + sleepq_assert_init_state(sleepq); + kmem_cache_free(&sleepq_cache, sleepq); +} + +struct sleepq * +sleepq_acquire(const void *sync_obj, bool condition) +{ + struct sleepq_bucket *bucket; + struct sleepq *sleepq; + + assert(sync_obj != NULL); + + bucket = sleepq_bucket_get(sync_obj, condition); + + spinlock_lock(&bucket->lock); + + sleepq = sleepq_bucket_lookup(bucket, sync_obj); + + if (sleepq == NULL) { + spinlock_unlock(&bucket->lock); + return NULL; + } + + return sleepq; +} + +void +sleepq_release(struct sleepq *sleepq) +{ + spinlock_unlock(&sleepq->bucket->lock); +} + +static void +sleepq_push_free(struct sleepq *sleepq, struct sleepq *free_sleepq) +{ + assert(free_sleepq->next_free == NULL); + free_sleepq->next_free = sleepq->next_free; + sleepq->next_free = free_sleepq; +} + +static struct sleepq * +sleepq_pop_free(struct sleepq *sleepq) +{ + struct sleepq *free_sleepq; + + free_sleepq = sleepq->next_free; + + if (free_sleepq == NULL) { + return NULL; + } + + sleepq->next_free = free_sleepq->next_free; + free_sleepq->next_free = NULL; + return free_sleepq; +} + +struct sleepq * +sleepq_lend(const void *sync_obj, bool condition) +{ + struct sleepq_bucket *bucket; + struct sleepq *sleepq, *prev; + + assert(sync_obj != NULL); + + sleepq = thread_sleepq_lend(); + sleepq_assert_init_state(sleepq); + + bucket = sleepq_bucket_get(sync_obj, condition); + + spinlock_lock(&bucket->lock); + + prev = sleepq_bucket_lookup(bucket, sync_obj); + + if (prev == NULL) { + sleepq_use(sleepq, sync_obj); + sleepq_bucket_add(bucket, sleepq); + } else { + sleepq_push_free(prev, sleepq); + sleepq = prev; + } + + return sleepq; +} + +void +sleepq_return(struct sleepq *sleepq) +{ + struct sleepq_bucket *bucket; + struct sleepq *free_sleepq; + + assert(sleepq_in_use(sleepq)); + + bucket = sleepq->bucket; + free_sleepq = sleepq_pop_free(sleepq); + + if (free_sleepq == NULL) { + sleepq_bucket_remove(bucket, sleepq); + sleepq_unuse(sleepq); + free_sleepq = sleepq; + } + + spinlock_unlock(&bucket->lock); + + sleepq_assert_init_state(free_sleepq); + thread_sleepq_return(free_sleepq); +} + +static void +sleepq_add_waiter(struct sleepq *sleepq, struct sleepq_waiter *waiter) +{ + list_insert_head(&sleepq->waiters, &waiter->node); +} + +static void +sleepq_remove_waiter(struct sleepq *sleepq, struct sleepq_waiter *waiter) +{ + (void)sleepq; + list_remove(&waiter->node); +} + +bool +sleepq_empty(const struct sleepq *sleepq) +{ + return list_empty(&sleepq->waiters); +} + +void +sleepq_wait(struct sleepq *sleepq, const char *wchan) +{ + struct sleepq_waiter waiter; + struct thread *thread; + + thread = thread_self(); + sleepq_waiter_init(&waiter, thread); + sleepq_add_waiter(sleepq, &waiter); + + thread_sleep(&sleepq->bucket->lock, sleepq->sync_obj, wchan); + + sleepq_remove_waiter(sleepq, &waiter); +} + +void +sleepq_signal(struct sleepq *sleepq) +{ + struct sleepq_waiter *waiter; + + if (sleepq_empty(sleepq)) { + return; + } + + waiter = list_last_entry(&sleepq->waiters, struct sleepq_waiter, node); + sleepq_waiter_wakeup(waiter); +} diff --git a/kern/sleepq.h b/kern/sleepq.h new file mode 100644 index 00000000..1ba4a319 --- /dev/null +++ b/kern/sleepq.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2017 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * Generic sleep queues. + * + * Sleep queues are used to build sleeping synchronization primitives + * such as mutexes and condition variables. + * + * Although the sleep queues are mostly generic, this implementation + * relies on knowing whether a synchronization object is a condition + * variable or not, because waiting on a condition variable unlocks + * the associated mutex, at which point two sleep queues are locked. + * Handling condition variable sleep queues slightly differently + * allows preventing deadlocks while keeping overall complexity low. + * + * In addition, despite being used to implement condition variables, + * this implementation doesn't provide a broadcast call. The rationale + * is to force users to implement "chained waking" in order to avoid + * the thundering herd effect. + */ + +#ifndef _KERN_SLEEPQ_H +#define _KERN_SLEEPQ_H + +#include <stdbool.h> + +struct sleepq; + +/* + * Initialize the sleepq module. + */ +void sleepq_setup(void); + +/* + * Create/destroy a sleep queue. + */ +struct sleepq * sleepq_create(void); +void sleepq_destroy(struct sleepq *sleepq); + +/* + * Acquire/release a sleep queue. + * + * Acquiring a sleep queue serializes all access and disables preemption. + * + * The condition argument must be true if the synchronization object + * is a condition variable. + */ +struct sleepq * sleepq_acquire(const void *sync_obj, bool condition); +void sleepq_release(struct sleepq *sleepq); + +/* + * Lend/return a sleep queue. + * + * A thread lends its private sleep queue to the sleepq module in + * order to prepare its sleep. The sleep queue obtained on lending + * is either the thread's queue, or an already existing queue for + * this synchronization object if another thread is waiting. + * + * When multiple threads are waiting on the same queue, the extra + * queues lent are kept in an internal free list, used when threads + * are awaken to return a queue to them. + * + * Note that the sleep queue returned may not be the one lent. + * + * The sleep queue obtained when lending is automatically acquired. + * + * The condition argument must be true if the synchronization object + * is a condition variable. + */ +struct sleepq * sleepq_lend(const void *sync_obj, bool condition); +void sleepq_return(struct sleepq *sleepq); + +/* + * Return true if the given sleep queue has no waiters. + * + * The sleep queue must be acquired when calling this function. + */ +bool sleepq_empty(const struct sleepq *sleepq); + +/* + * Wait for a wake up on the given sleep queue. + * + * The sleep queue must be lent when calling this function. It is + * released and later reacquired before returning from this function. + * + * The calling thread is considered a waiter as long as it didn't + * reacquire the sleep queue. This means that signalling a sleep queue + * has no visible effect on the number of waiters until the queue is + * released, e.g. if a single thread is waiting and another signals + * the queue, the queue is not immediately considered empty. + * + * Threads are queued in FIFO order. + */ +void sleepq_wait(struct sleepq *sleepq, const char *wchan); + +/* + * Wake up a thread waiting on the given sleep queue, if any. + * + * The sleep queue must be acquired when calling this function. + * + * Since a sleep queue must be lent (and in turn is automatically + * acquired) when waiting, and acquired in order to signal it, + * wake-ups are serialized and cannot be missed. + * + * Threads are queued in FIFO order, which means signalling a sleep + * queue multiple times always awakens the same thread, regardless + * of new waiters, as long as that first thread didn't reacquire the + * sleep queue. + */ +void sleepq_signal(struct sleepq *sleepq); + +#endif /* _KERN_SLEEPQ_H */ diff --git a/kern/thread.c b/kern/thread.c index 1631d9a5..e2b09ff4 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -98,6 +98,7 @@ #include <kern/panic.h> #include <kern/param.h> #include <kern/percpu.h> +#include <kern/sleepq.h> #include <kern/spinlock.h> #include <kern/sprintf.h> #include <kern/sref.h> @@ -1705,6 +1706,13 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, thread->runq = NULL; thread_set_wchan(thread, thread, "init"); thread->state = THREAD_SLEEPING; + thread->priv_sleepq = sleepq_create(); + + if (thread->priv_sleepq == NULL) { + error = ERROR_NOMEM; + goto error_sleepq; + } + thread->preempt = THREAD_SUSPEND_PREEMPT_LEVEL; thread->pinned = 0; thread->llsync_read = 0; @@ -1729,15 +1737,17 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, error = tcb_init(&thread->tcb, stack, thread_main); if (error) { - goto error_tsd; + goto error_tcb; } task_add_thread(task, thread); return 0; -error_tsd: +error_tcb: thread_destroy_tsd(thread); + sleepq_destroy(thread->priv_sleepq); +error_sleepq: return error; } @@ -1781,6 +1791,8 @@ thread_destroy(struct thread *thread) thread_destroy_tsd(thread); task_remove_thread(thread->task, thread); + + sleepq_destroy(thread->priv_sleepq); kmem_cache_free(&thread_stack_cache, thread->stack); kmem_cache_free(&thread_cache, thread); } diff --git a/kern/thread.h b/kern/thread.h index 21be2ab7..1542e7ae 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -367,6 +367,29 @@ thread_schedule(void) } /* + * Sleep queue lending functions. + */ + +static inline struct sleepq * +thread_sleepq_lend(void) +{ + struct sleepq *sleepq; + + sleepq = thread_self()->priv_sleepq; + assert(sleepq != NULL); + thread_self()->priv_sleepq = NULL; + return sleepq; +} + +static inline void +thread_sleepq_return(struct sleepq *sleepq) +{ + assert(sleepq != NULL); + assert(thread_self()->priv_sleepq == NULL); + thread_self()->priv_sleepq = sleepq; +} + +/* * Migration control functions. * * Functions that change the migration state are implicit compiler barriers. diff --git a/kern/thread_i.h b/kern/thread_i.h index a062773d..af278b26 100644 --- a/kern/thread_i.h +++ b/kern/thread_i.h @@ -27,6 +27,11 @@ #include <machine/atomic.h> #include <machine/tcb.h> +/* + * Forward declarations. + */ +struct sleepq; + struct thread_runq; struct thread_fs_runq; @@ -92,6 +97,9 @@ struct thread { const char *wchan_desc; unsigned short state; + /* Sleep queue available for lending */ + struct sleepq *priv_sleepq; + /* Thread-local members */ unsigned short preempt; unsigned short pinned; |