summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-07-10 19:40:05 +0200
committerRichard Braun <rbraun@sceen.net>2018-07-10 19:40:05 +0200
commit9ea1595a9156f2818b216d883e25f63bd74459c0 (patch)
treeaacf50829edcdce7a356c6bc8d629be82238e5fd
parent7cab594590002ff3737629b76083a298616b665a (diff)
kern/sleepq: make disabling interrupts optional
Commit d2a89f7f6e976d022527c2a5a1c75268aab8cd49 changed sleep queues to allow semaphores to be signalled from interrupt handlers, but this implied disabling interrupts for all synchronization objects, and most of them do not require interrupts to be disabled. The sleep queue interface is augmented with interrupt-related versions.
-rw-r--r--kern/condition.c15
-rw-r--r--kern/mutex/mutex_adaptive.c10
-rw-r--r--kern/mutex/mutex_plain.c10
-rw-r--r--kern/semaphore.c8
-rw-r--r--kern/sleepq.c114
-rw-r--r--kern/sleepq.h37
6 files changed, 144 insertions, 50 deletions
diff --git a/kern/condition.c b/kern/condition.c
index c407e949..e2c26b62 100644
--- a/kern/condition.c
+++ b/kern/condition.c
@@ -33,12 +33,11 @@ condition_wait_common(struct condition *condition, struct mutex *mutex,
bool timed, uint64_t ticks)
{
struct sleepq *sleepq;
- unsigned long flags;
int error;
mutex_assert_locked(mutex);
- sleepq = sleepq_lend(condition, true, &flags);
+ sleepq = sleepq_lend(condition, true);
mutex_unlock(mutex);
@@ -49,7 +48,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex,
error = 0;
}
- sleepq_return(sleepq, flags);
+ sleepq_return(sleepq);
mutex_lock(mutex);
@@ -76,9 +75,8 @@ void
condition_signal(struct condition *condition)
{
struct sleepq *sleepq;
- unsigned long flags;
- sleepq = sleepq_acquire(condition, true, &flags);
+ sleepq = sleepq_acquire(condition, true);
if (sleepq == NULL) {
return;
@@ -86,16 +84,15 @@ condition_signal(struct condition *condition)
sleepq_signal(sleepq);
- sleepq_release(sleepq, flags);
+ sleepq_release(sleepq);
}
void
condition_broadcast(struct condition *condition)
{
struct sleepq *sleepq;
- unsigned long flags;
- sleepq = sleepq_acquire(condition, true, &flags);
+ sleepq = sleepq_acquire(condition, true);
if (sleepq == NULL) {
return;
@@ -103,5 +100,5 @@ condition_broadcast(struct condition *condition)
sleepq_broadcast(sleepq);
- sleepq_release(sleepq, flags);
+ sleepq_release(sleepq);
}
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
index d17ad9d4..6c99eaf9 100644
--- a/kern/mutex/mutex_adaptive.c
+++ b/kern/mutex/mutex_adaptive.c
@@ -124,13 +124,12 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
uintptr_t self, owner;
struct sleepq *sleepq;
struct thread *thread;
- unsigned long flags;
int error;
error = 0;
self = (uintptr_t)thread_self();
- sleepq = sleepq_lend(mutex, false, &flags);
+ sleepq = sleepq_lend(mutex, false);
mutex_adaptive_set_contended(mutex);
@@ -223,7 +222,7 @@ mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
}
out:
- sleepq_return(sleepq, flags);
+ sleepq_return(sleepq);
return error;
}
@@ -248,7 +247,6 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
{
uintptr_t self, owner;
struct sleepq *sleepq;
- unsigned long flags;
int error;
self = (uintptr_t)thread_self() | MUTEX_ADAPTIVE_CONTENDED;
@@ -301,12 +299,12 @@ mutex_adaptive_unlock_slow(struct mutex *mutex)
* on the current thread, in which case the latter doesn't return,
* averting the need for an additional reference.
*/
- sleepq = sleepq_tryacquire(mutex, false, &flags);
+ sleepq = sleepq_tryacquire(mutex, false);
if (sleepq != NULL) {
mutex_adaptive_inc_sc(MUTEX_ADAPTIVE_SC_SIGNALS);
sleepq_signal(sleepq);
- sleepq_release(sleepq, flags);
+ sleepq_release(sleepq);
break;
}
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
index 55e7a251..bc49ca0b 100644
--- a/kern/mutex/mutex_plain.c
+++ b/kern/mutex/mutex_plain.c
@@ -76,12 +76,11 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
{
unsigned int state;
struct sleepq *sleepq;
- unsigned long flags;
int error;
error = 0;
- sleepq = sleepq_lend(mutex, false, &flags);
+ sleepq = sleepq_lend(mutex, false);
for (;;) {
state = atomic_swap(&mutex->state, MUTEX_CONTENDED, ATOMIC_RELEASE);
@@ -121,7 +120,7 @@ mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
}
out:
- sleepq_return(sleepq, flags);
+ sleepq_return(sleepq);
return error;
}
@@ -145,9 +144,8 @@ void
mutex_plain_unlock_slow(struct mutex *mutex)
{
struct sleepq *sleepq;
- unsigned long flags;
- sleepq = sleepq_acquire(mutex, false, &flags);
+ sleepq = sleepq_acquire(mutex, false);
if (sleepq == NULL) {
return;
@@ -155,7 +153,7 @@ mutex_plain_unlock_slow(struct mutex *mutex)
sleepq_signal(sleepq);
- sleepq_release(sleepq, flags);
+ sleepq_release(sleepq);
}
static int
diff --git a/kern/semaphore.c b/kern/semaphore.c
index 72e843a9..a95f17fa 100644
--- a/kern/semaphore.c
+++ b/kern/semaphore.c
@@ -35,7 +35,7 @@ semaphore_wait_slow_common(struct semaphore *semaphore,
error = 0;
- sleepq = sleepq_lend(semaphore, false, &flags);
+ sleepq = sleepq_lend_intr_save(semaphore, false, &flags);
for (;;) {
prev = semaphore_dec(semaphore);
@@ -55,7 +55,7 @@ semaphore_wait_slow_common(struct semaphore *semaphore,
}
}
- sleepq_return(sleepq, flags);
+ sleepq_return_intr_restore(sleepq, flags);
return error;
}
@@ -81,7 +81,7 @@ semaphore_post_slow(struct semaphore *semaphore)
struct sleepq *sleepq;
unsigned long flags;
- sleepq = sleepq_acquire(semaphore, false, &flags);
+ sleepq = sleepq_acquire_intr_save(semaphore, false, &flags);
if (sleepq == NULL) {
return;
@@ -89,5 +89,5 @@ semaphore_post_slow(struct semaphore *semaphore)
sleepq_signal(sleepq);
- sleepq_release(sleepq, flags);
+ sleepq_release_intr_restore(sleepq, flags);
}
diff --git a/kern/sleepq.c b/kern/sleepq.c
index 44ab53bc..df37a49d 100644
--- a/kern/sleepq.c
+++ b/kern/sleepq.c
@@ -265,8 +265,8 @@ sleepq_destroy(struct sleepq *sleepq)
kmem_cache_free(&sleepq_cache, sleepq);
}
-struct sleepq *
-sleepq_acquire(const void *sync_obj, bool condition, unsigned long *flags)
+static struct sleepq *
+sleepq_acquire_common(const void *sync_obj, bool condition, unsigned long *flags)
{
struct sleepq_bucket *bucket;
struct sleepq *sleepq;
@@ -275,20 +275,30 @@ sleepq_acquire(const void *sync_obj, bool condition, unsigned long *flags)
bucket = sleepq_bucket_get(sync_obj, condition);
- spinlock_lock_intr_save(&bucket->lock, flags);
+ if (flags) {
+ spinlock_lock_intr_save(&bucket->lock, flags);
+ } else {
+ spinlock_lock(&bucket->lock);
+ }
sleepq = sleepq_bucket_lookup(bucket, sync_obj);
if (sleepq == NULL) {
- spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ if (flags) {
+ spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ } else {
+ spinlock_unlock(&bucket->lock);
+ }
+
return NULL;
}
return sleepq;
}
-struct sleepq *
-sleepq_tryacquire(const void *sync_obj, bool condition, unsigned long *flags)
+static struct sleepq *
+sleepq_tryacquire_common(const void *sync_obj, bool condition,
+ unsigned long *flags)
{
struct sleepq_bucket *bucket;
struct sleepq *sleepq;
@@ -298,7 +308,11 @@ sleepq_tryacquire(const void *sync_obj, bool condition, unsigned long *flags)
bucket = sleepq_bucket_get(sync_obj, condition);
- error = spinlock_trylock_intr_save(&bucket->lock, flags);
+ if (flags) {
+ error = spinlock_trylock_intr_save(&bucket->lock, flags);
+ } else {
+ error = spinlock_trylock(&bucket->lock);
+ }
if (error) {
return NULL;
@@ -307,15 +321,52 @@ sleepq_tryacquire(const void *sync_obj, bool condition, unsigned long *flags)
sleepq = sleepq_bucket_lookup(bucket, sync_obj);
if (sleepq == NULL) {
- spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ if (flags) {
+ spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ } else {
+ spinlock_unlock(&bucket->lock);
+ }
+
return NULL;
}
return sleepq;
}
+struct sleepq *
+sleepq_acquire(const void *sync_obj, bool condition)
+{
+ return sleepq_acquire_common(sync_obj, condition, NULL);
+}
+
+struct sleepq *
+sleepq_tryacquire(const void *sync_obj, bool condition)
+{
+ return sleepq_tryacquire_common(sync_obj, condition, NULL);
+}
+
void
-sleepq_release(struct sleepq *sleepq, unsigned long flags)
+sleepq_release(struct sleepq *sleepq)
+{
+ spinlock_unlock(&sleepq->bucket->lock);
+}
+
+struct sleepq *
+sleepq_acquire_intr_save(const void *sync_obj, bool condition,
+ unsigned long *flags)
+{
+ return sleepq_acquire_common(sync_obj, condition, flags);
+}
+
+struct sleepq *
+sleepq_tryacquire_intr_save(const void *sync_obj, bool condition,
+ unsigned long *flags)
+{
+ return sleepq_tryacquire_common(sync_obj, condition, flags);
+}
+
+void
+sleepq_release_intr_restore(struct sleepq *sleepq, unsigned long flags)
{
spinlock_unlock_intr_restore(&sleepq->bucket->lock, flags);
}
@@ -344,8 +395,8 @@ sleepq_pop_free(struct sleepq *sleepq)
return free_sleepq;
}
-struct sleepq *
-sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags)
+static struct sleepq *
+sleepq_lend_common(const void *sync_obj, bool condition, unsigned long *flags)
{
struct sleepq_bucket *bucket;
struct sleepq *sleepq, *prev;
@@ -357,7 +408,11 @@ sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags)
bucket = sleepq_bucket_get(sync_obj, condition);
- spinlock_lock_intr_save(&bucket->lock, flags);
+ if (flags) {
+ spinlock_lock_intr_save(&bucket->lock, flags);
+ } else {
+ spinlock_lock(&bucket->lock);
+ }
prev = sleepq_bucket_lookup(bucket, sync_obj);
@@ -372,8 +427,8 @@ sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags)
return sleepq;
}
-void
-sleepq_return(struct sleepq *sleepq, unsigned long flags)
+static void
+sleepq_return_common(struct sleepq *sleepq, unsigned long *flags)
{
struct sleepq_bucket *bucket;
struct sleepq *free_sleepq;
@@ -389,12 +444,41 @@ sleepq_return(struct sleepq *sleepq, unsigned long flags)
free_sleepq = sleepq;
}
- spinlock_unlock_intr_restore(&bucket->lock, flags);
+ if (flags) {
+ spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ } else {
+ spinlock_unlock(&bucket->lock);
+ }
sleepq_assert_init_state(free_sleepq);
thread_sleepq_return(free_sleepq);
}
+struct sleepq *
+sleepq_lend(const void *sync_obj, bool condition)
+{
+ return sleepq_lend_common(sync_obj, condition, NULL);
+}
+
+void
+sleepq_return(struct sleepq *sleepq)
+{
+ sleepq_return_common(sleepq, NULL);
+}
+
+struct sleepq *
+sleepq_lend_intr_save(const void *sync_obj, bool condition,
+ unsigned long *flags)
+{
+ return sleepq_lend_common(sync_obj, condition, flags);
+}
+
+void
+sleepq_return_intr_restore(struct sleepq *sleepq, unsigned long flags)
+{
+ sleepq_return_common(sleepq, &flags);
+}
+
static void
sleepq_shift_oldest_waiter(struct sleepq *sleepq)
{
diff --git a/kern/sleepq.h b/kern/sleepq.h
index 10b139fb..72f39843 100644
--- a/kern/sleepq.h
+++ b/kern/sleepq.h
@@ -47,8 +47,7 @@ void sleepq_destroy(struct sleepq *sleepq);
/*
* Acquire/release a sleep queue.
*
- * Acquiring a sleep queue serializes all access and disables both
- * preemption and interrupts.
+ * Acquiring a sleep queue serializes all access and disables preemption.
*
* The condition argument must be true if the synchronization object
* is a condition variable.
@@ -57,11 +56,22 @@ void sleepq_destroy(struct sleepq *sleepq);
* return NULL if internal state shared by unrelated synchronization
* objects is locked.
*/
-struct sleepq * sleepq_acquire(const void *sync_obj, bool condition,
- unsigned long *flags);
-struct sleepq * sleepq_tryacquire(const void *sync_obj, bool condition,
- unsigned long *flags);
-void sleepq_release(struct sleepq *sleepq, unsigned long flags);
+struct sleepq * sleepq_acquire(const void *sync_obj, bool condition);
+struct sleepq * sleepq_tryacquire(const void *sync_obj, bool condition);
+void sleepq_release(struct sleepq *sleepq);
+
+/*
+ * Versions of the sleep queue acquisition functions that also disable
+ * interrupts.
+ */
+struct sleepq * sleepq_acquire_intr_save(const void *sync_obj,
+ bool condition,
+ unsigned long *flags);
+struct sleepq * sleepq_tryacquire_intr_save(const void *sync_obj,
+ bool condition,
+ unsigned long *flags);
+void sleepq_release_intr_restore(struct sleepq *sleepq,
+ unsigned long flags);
/*
* Lend/return a sleep queue.
@@ -82,9 +92,16 @@ void sleepq_release(struct sleepq *sleepq, unsigned long flags);
* The condition argument must be true if the synchronization object
* is a condition variable.
*/
-struct sleepq * sleepq_lend(const void *sync_obj, bool condition,
- unsigned long *flags);
-void sleepq_return(struct sleepq *sleepq, unsigned long flags);
+struct sleepq * sleepq_lend(const void *sync_obj, bool condition);
+void sleepq_return(struct sleepq *sleepq);
+
+/*
+ * Versions of the sleep queue lending functions that also disable
+ * interrupts.
+ */
+struct sleepq * sleepq_lend_intr_save(const void *sync_obj, bool condition,
+ unsigned long *flags);
+void sleepq_return_intr_restore(struct sleepq *sleepq, unsigned long flags);
/*
* Return true if the given sleep queue has no waiters.