summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
Diffstat (limited to 'kern')
-rw-r--r--kern/rbtree.h2
-rw-r--r--kern/thread.c2
-rw-r--r--kern/thread.h1
-rw-r--r--kern/work.h2
-rw-r--r--kern/xcall.c1
5 files changed, 7 insertions, 1 deletions
diff --git a/kern/rbtree.h b/kern/rbtree.h
index 4ae8353f..3de240b6 100644
--- a/kern/rbtree.h
+++ b/kern/rbtree.h
@@ -265,6 +265,8 @@ rbtree_insert_slot(struct rbtree *tree, rbtree_slot_t slot,
* Remove a node from a tree.
*
* After completion, the node is stale.
+ *
+ * TODO rbtree_replace.
*/
void rbtree_remove(struct rbtree *tree, struct rbtree_node *node);
diff --git a/kern/thread.c b/kern/thread.c
index 043f14de..e4d8f7cf 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1861,7 +1861,7 @@ thread_lock_runq(struct thread *thread, unsigned long *flags)
struct thread_runq *runq;
for (;;) {
- runq = thread->runq;
+ runq = thread->runq; /* TODO Atomic access */
spinlock_lock_intr_save(&runq->lock, flags);
diff --git a/kern/thread.h b/kern/thread.h
index e84300c9..a3f2670d 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -652,6 +652,7 @@ thread_intr_leave(void)
}
}
+/* TODO Use in interrupt handlers instead of manual interrupt/preemption checks */
static inline void
thread_assert_interrupted(void)
{
diff --git a/kern/work.h b/kern/work.h
index a8df1f7e..2d7bd62f 100644
--- a/kern/work.h
+++ b/kern/work.h
@@ -46,6 +46,8 @@ typedef void (*work_fn_t)(struct work *);
* This structure should be embedded in objects related to the work. It
* stores the work function and is passed to it as its only parameter.
* The function can then find the containing object with the structof macro.
+ *
+ * TODO Make private.
*/
struct work {
struct work *next;
diff --git a/kern/xcall.c b/kern/xcall.c
index 44bd41f5..b5ed4b24 100644
--- a/kern/xcall.c
+++ b/kern/xcall.c
@@ -133,6 +133,7 @@ xcall_call(xcall_fn_t fn, void *arg, unsigned int cpu)
thread_preempt_disable();
+ /* TODO Fix to match interrupt context semantics */
if (cpu == cpu_id()) {
unsigned long flags;