diff options
author | Richard Braun <rbraun@sceen.net> | 2014-06-18 23:35:45 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2014-06-18 23:35:45 +0200 |
commit | 701a5d9cf5a9416eb8303d703049bf1d4c6c69f0 (patch) | |
tree | 0b0fa70492a01df7fb9511ccd388e220f7fc150d /kern | |
parent | 6b5c9352dc34ef945259994b16c3e1fa1783aef4 (diff) |
kern/thread: add thread_join
This change affects more files than it apparently would at first glance.
This is because circular dependencies can easily be created between the
thread, mutex, condition and spinlock modules. As a result, some of the
types of these modules are now defined in kern/types.h.
Diffstat (limited to 'kern')
-rw-r--r-- | kern/condition.c | 1 | ||||
-rw-r--r-- | kern/condition.h | 9 | ||||
-rw-r--r-- | kern/mutex.c | 1 | ||||
-rw-r--r-- | kern/mutex.h | 1 | ||||
-rw-r--r-- | kern/mutex_i.h | 7 | ||||
-rw-r--r-- | kern/spinlock.h | 3 | ||||
-rw-r--r-- | kern/spinlock_i.h | 5 | ||||
-rw-r--r-- | kern/thread.c | 194 | ||||
-rw-r--r-- | kern/thread.h | 25 | ||||
-rw-r--r-- | kern/types.h | 24 |
10 files changed, 168 insertions, 102 deletions
diff --git a/kern/condition.c b/kern/condition.c index 9faab3c6..0012aa18 100644 --- a/kern/condition.c +++ b/kern/condition.c @@ -30,6 +30,7 @@ #include <kern/spinlock.h> #include <kern/stddef.h> #include <kern/thread.h> +#include <kern/types.h> void condition_wait(struct condition *condition, struct mutex *mutex) diff --git a/kern/condition.h b/kern/condition.h index 6802063b..9cd581a9 100644 --- a/kern/condition.h +++ b/kern/condition.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Richard Braun. + * Copyright (c) 2013-2014 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,12 +25,9 @@ #include <kern/mutex.h> #include <kern/spinlock.h> #include <kern/stddef.h> +#include <kern/types.h> -struct condition { - struct spinlock lock; - struct mutex *mutex; - struct list waiters; -}; +struct condition; #define CONDITION_INITIALIZER(condition) \ { SPINLOCK_INITIALIZER, NULL, LIST_INITIALIZER((condition).waiters) } diff --git a/kern/mutex.c b/kern/mutex.c index 58e24513..3c76f50c 100644 --- a/kern/mutex.c +++ b/kern/mutex.c @@ -19,6 +19,7 @@ #include <kern/mutex_i.h> #include <kern/spinlock.h> #include <kern/thread.h> +#include <kern/types.h> void mutex_lock_slow(struct mutex *mutex) diff --git a/kern/mutex.h b/kern/mutex.h index 1ed5234e..011f7033 100644 --- a/kern/mutex.h +++ b/kern/mutex.h @@ -27,6 +27,7 @@ #include <kern/list.h> #include <kern/mutex_i.h> #include <kern/spinlock.h> +#include <kern/types.h> struct mutex; diff --git a/kern/mutex_i.h b/kern/mutex_i.h index 85b827d8..a5c1ba31 100644 --- a/kern/mutex_i.h +++ b/kern/mutex_i.h @@ -22,6 +22,7 @@ #include <kern/list.h> #include <kern/spinlock.h> #include <kern/thread.h> +#include <kern/types.h> #include <machine/atomic.h> #define MUTEX_UNLOCKED 0 @@ -33,12 +34,6 @@ struct mutex_waiter { struct thread *thread; }; -struct mutex { - unsigned int state; - struct spinlock lock; - struct list waiters; -}; - void mutex_lock_slow(struct mutex *mutex); void mutex_unlock_slow(struct mutex *mutex); diff --git a/kern/spinlock.h b/kern/spinlock.h index 4c983471..8e67df3f 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013 Richard Braun. + * Copyright (c) 2012-2014 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,7 @@ #include <kern/macros.h> #include <kern/spinlock_i.h> #include <kern/thread.h> +#include <kern/types.h> #include <machine/cpu.h> struct spinlock; diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h index 63018afa..c07f6615 100644 --- a/kern/spinlock_i.h +++ b/kern/spinlock_i.h @@ -19,13 +19,10 @@ #define _KERN_SPINLOCK_I_H #include <kern/assert.h> +#include <kern/types.h> #include <machine/atomic.h> #include <machine/cpu.h> -struct spinlock { - unsigned int locked; -}; - /* * Return 0 on success, 1 if busy. */ diff --git a/kern/thread.c b/kern/thread.c index 4449fe8c..0532321b 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -291,10 +291,10 @@ static thread_dtor_fn_t thread_dtors[THREAD_KEYS_MAX] __read_mostly; * List of threads pending for destruction by the reaper. */ static struct mutex thread_reap_lock; -static struct condition thread_reap_condition; +static struct condition thread_reap_cond; static struct list thread_reap_list; -struct thread_reap_waiter { +struct thread_zombie { struct list node; struct thread *thread; }; @@ -1473,12 +1473,18 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, cpumap_copy(&thread->cpumap, cpumap); thread_init_sched(thread, attr->priority); memset(thread->tsd, 0, sizeof(thread->tsd)); + mutex_init(&thread->join_lock); + condition_init(&thread->join_cond); + thread->exited = 0; thread->task = task; thread->stack = stack; strlcpy(thread->name, attr->name, sizeof(thread->name)); thread->fn = fn; thread->arg = arg; + if (attr->flags & THREAD_ATTR_DETACHED) + thread->flags |= THREAD_DETACHED; + /* * This call may initialize thread-local data, do it once the thread is * mostly initialized. @@ -1493,74 +1499,11 @@ thread_init(struct thread *thread, void *stack, const struct thread_attr *attr, return 0; } -static struct thread_runq * -thread_lock_runq(struct thread *thread, unsigned long *flags) -{ - struct thread_runq *runq; - - assert(thread != thread_self()); - - for (;;) { - runq = thread->runq; - - spinlock_lock_intr_save(&runq->lock, flags); - - if (runq == thread->runq) - return runq; - - spinlock_unlock_intr_restore(&runq->lock, *flags); - } -} - -static void -thread_unlock_runq(struct thread_runq *runq, unsigned long flags) -{ - spinlock_unlock_intr_restore(&runq->lock, flags); -} - -static void -thread_destroy(struct thread *thread) -{ - struct thread_runq *runq; - unsigned long flags, state; - unsigned int i; - void *ptr; - - do { - runq = thread_lock_runq(thread, &flags); - state = thread->state; - thread_unlock_runq(runq, flags); - } while (state != THREAD_DEAD); - - i = 0; - - while (i < thread_nr_keys) { - if ((thread->tsd[i] == NULL) - || (thread_dtors[i] == NULL)) - continue; - - /* - * Follow the POSIX description of TSD: set the key to NULL before - * calling the destructor and repeat as long as it's not NULL. - */ - ptr = thread->tsd[i]; - thread->tsd[i] = NULL; - thread_dtors[i](ptr); - - if (thread->tsd[i] == NULL) - i++; - } - - task_remove_thread(thread->task, thread); - kmem_cache_free(&thread_stack_cache, thread->stack); - kmem_cache_free(&thread_cache, thread); -} - static void thread_reap(void *arg) { - struct thread_reap_waiter *tmp; - struct list waiters; + struct thread_zombie *zombie; + struct list zombies; (void)arg; @@ -1568,17 +1511,17 @@ thread_reap(void *arg) mutex_lock(&thread_reap_lock); while (list_empty(&thread_reap_list)) - condition_wait(&thread_reap_condition, &thread_reap_lock); + condition_wait(&thread_reap_cond, &thread_reap_lock); - list_set_head(&waiters, &thread_reap_list); + list_set_head(&zombies, &thread_reap_list); list_init(&thread_reap_list); mutex_unlock(&thread_reap_lock); - while (!list_empty(&waiters)) { - tmp = list_first_entry(&waiters, struct thread_reap_waiter, node); - list_remove(&tmp->node); - thread_destroy(tmp->thread); + while (!list_empty(&zombies)) { + zombie = list_first_entry(&zombies, struct thread_zombie, node); + list_remove(&zombie->node); + thread_join(zombie->thread); } } @@ -1593,7 +1536,7 @@ thread_setup_reaper(void) int error; mutex_init(&thread_reap_lock); - condition_init(&thread_reap_condition); + condition_init(&thread_reap_cond); list_init(&thread_reap_list); thread_attr_init(&attr, "x15_thread_reap"); @@ -1835,35 +1778,120 @@ error_thread: void thread_exit(void) { - struct thread_reap_waiter waiter; + struct thread_zombie zombie; struct thread_runq *runq; struct thread *thread; unsigned long flags; thread = thread_self(); - waiter.thread = thread; - mutex_lock(&thread_reap_lock); + if (thread_test_flag(thread, THREAD_DETACHED)) { + zombie.thread = thread; + + mutex_lock(&thread_reap_lock); + list_insert_tail(&thread_reap_list, &zombie.node); + condition_signal(&thread_reap_cond); + mutex_unlock(&thread_reap_lock); + } - list_insert_tail(&thread_reap_list, &waiter.node); - condition_signal(&thread_reap_condition); + mutex_lock(&thread->join_lock); + thread->exited = 1; + condition_signal(&thread->join_cond); /* * Disable preemption before releasing the mutex to make sure the current * thread becomes dead as soon as possible. This is important because the - * reaper thread actively polls the thread state before destroying it. + * joining thread actively polls the thread state before destroying it. */ thread_preempt_disable(); - mutex_unlock(&thread_reap_lock); + mutex_unlock(&thread->join_lock); runq = thread_runq_local(); spinlock_lock_intr_save(&runq->lock, &flags); thread->state = THREAD_DEAD; - runq = thread_runq_schedule(runq, thread); - panic("thread: dead thread running"); + thread_runq_schedule(runq, thread); + panic("thread: dead thread walking"); +} + +static struct thread_runq * +thread_lock_runq(struct thread *thread, unsigned long *flags) +{ + struct thread_runq *runq; + + assert(thread != thread_self()); + + for (;;) { + runq = thread->runq; + + spinlock_lock_intr_save(&runq->lock, flags); + + if (runq == thread->runq) + return runq; + + spinlock_unlock_intr_restore(&runq->lock, *flags); + } +} + +static void +thread_unlock_runq(struct thread_runq *runq, unsigned long flags) +{ + spinlock_unlock_intr_restore(&runq->lock, flags); +} + +static void +thread_destroy(struct thread *thread) +{ + struct thread_runq *runq; + unsigned long flags, state; + unsigned int i; + void *ptr; + + do { + runq = thread_lock_runq(thread, &flags); + state = thread->state; + thread_unlock_runq(runq, flags); + } while (state != THREAD_DEAD); + + i = 0; + + while (i < thread_nr_keys) { + if ((thread->tsd[i] == NULL) + || (thread_dtors[i] == NULL)) + continue; + + /* + * Follow the POSIX description of TSD: set the key to NULL before + * calling the destructor and repeat as long as it's not NULL. + */ + ptr = thread->tsd[i]; + thread->tsd[i] = NULL; + thread_dtors[i](ptr); + + if (thread->tsd[i] == NULL) + i++; + } + + task_remove_thread(thread->task, thread); + kmem_cache_free(&thread_stack_cache, thread->stack); + kmem_cache_free(&thread_cache, thread); +} + +void +thread_join(struct thread *thread) +{ + assert(thread != thread_self()); + + mutex_lock(&thread->join_lock); + + while (!thread->exited) + condition_wait(&thread->join_cond, &thread->join_lock); + + mutex_unlock(&thread->join_lock); + + thread_destroy(thread); } void diff --git a/kern/thread.h b/kern/thread.h index aa5dc4f7..d1c5f53a 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -39,6 +39,7 @@ #include <kern/list.h> #include <kern/macros.h> #include <kern/param.h> +#include <kern/types.h> #include <machine/atomic.h> #include <machine/cpu.h> #include <machine/tcb.h> @@ -59,7 +60,8 @@ struct thread_ts_runq; /* * Thread flags. */ -#define THREAD_YIELD 0x1UL /* Must yield the processor ASAP */ +#define THREAD_YIELD 0x1UL /* Must yield the processor ASAP */ +#define THREAD_DETACHED 0x2UL /* Resources automatically released on exit */ /* * Thread states. @@ -172,6 +174,11 @@ struct thread { /* Thread-specific data */ void *tsd[THREAD_KEYS_MAX]; + /* Members related to termination */ + struct mutex join_lock; + struct condition join_cond; + int exited; + /* Read-only members */ struct task *task; struct list task_node; @@ -181,11 +188,14 @@ struct thread { void *arg; } __aligned(CPU_L1_SIZE); +#define THREAD_ATTR_DETACHED 0x1 + /* * Thread creation attributes. */ struct thread_attr { const char *name; + unsigned long flags; struct cpumap *cpumap; struct task *task; unsigned char policy; @@ -196,6 +206,7 @@ struct thread_attr { * Initialize thread creation attributes with default values. * * It is guaranteed that these default values include : + * - thread is joinable * - no processor affinity * - task is inherited from parent thread * - policy is time-sharing @@ -208,6 +219,7 @@ static inline void thread_attr_init(struct thread_attr *attr, const char *name) { attr->name = name; + attr->flags = 0; attr->cpumap = NULL; attr->task = NULL; attr->policy = THREAD_SCHED_POLICY_TS; @@ -215,6 +227,12 @@ thread_attr_init(struct thread_attr *attr, const char *name) } static inline void +thread_attr_set_detached(struct thread_attr *attr) +{ + attr->flags |= THREAD_ATTR_DETACHED; +} + +static inline void thread_attr_set_cpumap(struct thread_attr *attr, struct cpumap *cpumap) { attr->cpumap = cpumap; @@ -268,6 +286,11 @@ int thread_create(struct thread **threadp, const struct thread_attr *attr, void __noreturn thread_exit(void); /* + * Wait for the given thread to terminate and release its resources. + */ +void thread_join(struct thread *thread); + +/* * Make the current thread sleep while waiting for an event. * * The interlock is used to synchronize the thread state with respect to diff --git a/kern/types.h b/kern/types.h index 5fc3cb59..5e8a7db4 100644 --- a/kern/types.h +++ b/kern/types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Richard Braun. + * Copyright (c) 2012-2014 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,4 +20,26 @@ #include <machine/types.h> +/* + * Types defined here to avoid inclusion loops. + */ + +#include <kern/list.h> + +struct spinlock { + unsigned int locked; +}; + +struct mutex { + unsigned int state; + struct spinlock lock; + struct list waiters; +}; + +struct condition { + struct spinlock lock; + struct mutex *mutex; + struct list waiters; +}; + #endif /* _KERN_TYPES_H */ |