summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2012-11-17 20:37:24 +0100
committerRichard Braun <rbraun@sceen.net>2012-11-17 20:49:31 +0100
commit3119195c519d1289362b9530785709b6a398590e (patch)
tree5999970edc8ad172dbeb88734e72feba8bb2b576 /kern
parent19d64b4177f7720c6e2aab520436af14db0ffa54 (diff)
kern/spinlock: don't disable interrupts
Let the users deal with interrupts themselves. It allows greater freedom to modules that need to carefully change the machine state.
Diffstat (limited to 'kern')
-rw-r--r--kern/printk.c7
-rw-r--r--kern/spinlock.h30
2 files changed, 10 insertions, 27 deletions
diff --git a/kern/printk.c b/kern/printk.c
index 386ac979..6ea8bd35 100644
--- a/kern/printk.c
+++ b/kern/printk.c
@@ -18,6 +18,7 @@
#include <kern/printk.h>
#include <kern/spinlock.h>
#include <kern/sprintf.h>
+#include <machine/cpu.h>
/*
* Size of the static buffer.
@@ -52,14 +53,16 @@ vprintk(const char *format, va_list ap)
int length;
char *ptr;
- flags = spinlock_lock(&printk_lock);
+ flags = cpu_intr_save();
+ spinlock_lock(&printk_lock);
length = vsnprintf(printk_buffer, sizeof(printk_buffer), format, ap);
for (ptr = printk_buffer; *ptr != '\0'; ptr++)
console_write_byte(*ptr);
- spinlock_unlock(&printk_lock, flags);
+ spinlock_unlock(&printk_lock);
+ cpu_intr_restore(flags);
return length;
}
diff --git a/kern/spinlock.h b/kern/spinlock.h
index 1169900e..6ae3ed6b 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -18,10 +18,7 @@
* Spin lock.
*
* This implementation relies on the availability of hardware compare-and-swap
- * support. In addition, spin locks are reserved for the special cases where
- * a critical section must work in every context (thread, interrupt, or even
- * during early boot). As a result, interrupts are disabled when a spin lock
- * is acquired.
+ * support.
*/
#ifndef _KERN_SPINLOCK_H
@@ -54,45 +51,28 @@ spinlock_init(struct spinlock *lock)
* Return false if acquired, true if busy.
*/
static inline int
-spinlock_trylock(struct spinlock *lock, unsigned long *flagsp)
+spinlock_trylock(struct spinlock *lock)
{
- unsigned long flags, locked;
-
- flags = cpu_intr_save();
- locked = atomic_cas(&lock->locked, 0, 1);
-
- if (locked)
- cpu_intr_restore(flags);
- else
- *flagsp = flags;
-
- return locked;
+ return atomic_cas(&lock->locked, 0, 1);
}
/*
* Acquire a spin lock.
*/
-static inline unsigned long
+static inline void
spinlock_lock(struct spinlock *lock)
{
- unsigned long flags;
-
- flags = cpu_intr_save();
-
while (atomic_cas(&lock->locked, 0, 1))
cpu_pause();
-
- return flags;
}
/*
* Release a spin lock.
*/
static inline void
-spinlock_unlock(struct spinlock *lock, unsigned long flags)
+spinlock_unlock(struct spinlock *lock)
{
atomic_swap(&lock->locked, 0);
- cpu_intr_restore(flags);
}
#endif /* _KERN_SPINLOCK_H */