summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-05-15 23:18:16 +0200
committerRichard Braun <rbraun@sceen.net>2013-05-15 23:18:16 +0200
commit683050a0eebf8678496bdd8200408361266d6f6e (patch)
treeac7f2bb6c7bcbf7c22e2b88ebc519a1fbe43616b
parentcf572e25ec9bf2d56c1fef5eeced991dff72db5d (diff)
kern/list: rename list_insert to list_insert_head
This change increases clarity.
-rw-r--r--kern/kmem.c8
-rw-r--r--kern/list.h2
-rw-r--r--kern/task.c4
-rw-r--r--kern/thread.c2
-rw-r--r--vm/vm_phys.c4
5 files changed, 10 insertions, 10 deletions
diff --git a/kern/kmem.c b/kern/kmem.c
index ea75d7d1..46c9c10b 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -635,7 +635,7 @@ kmem_cache_grow(struct kmem_cache *cache)
mutex_lock(&cache->lock);
if (slab != NULL) {
- list_insert(&cache->free_slabs, &slab->node);
+ list_insert_head(&cache->free_slabs, &slab->node);
cache->nr_bufs += cache->bufs_per_slab;
cache->nr_slabs++;
cache->nr_free_slabs++;
@@ -688,7 +688,7 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache)
} else if (slab->nr_refs == 1) {
/* The slab has become partial */
list_remove(&slab->node);
- list_insert(&cache->partial_slabs, &slab->node);
+ list_insert_head(&cache->partial_slabs, &slab->node);
cache->nr_free_slabs--;
}
@@ -735,11 +735,11 @@ kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
if (cache->bufs_per_slab != 1)
list_remove(&slab->node);
- list_insert(&cache->free_slabs, &slab->node);
+ list_insert_head(&cache->free_slabs, &slab->node);
cache->nr_free_slabs++;
} else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
/* The slab has become partial */
- list_insert(&cache->partial_slabs, &slab->node);
+ list_insert_head(&cache->partial_slabs, &slab->node);
}
}
diff --git a/kern/list.h b/kern/list.h
index 647e94c9..fef0370b 100644
--- a/kern/list.h
+++ b/kern/list.h
@@ -241,7 +241,7 @@ list_add(struct list *prev, struct list *next, struct list *node)
* Insert a node at the head of a list.
*/
static inline void
-list_insert(struct list *list, struct list *node)
+list_insert_head(struct list *list, struct list *node)
{
list_add(list, list->next, node);
}
diff --git a/kern/task.c b/kern/task.c
index 000a723e..88edda3e 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -61,7 +61,7 @@ task_setup(void)
list_init(&task_list);
spinlock_init(&task_list_lock);
task_init(kernel_task, "x15", kernel_map);
- list_insert(&task_list, &kernel_task->node);
+ list_insert_head(&task_list, &kernel_task->node);
}
int
@@ -86,7 +86,7 @@ task_create(struct task **taskp, const char *name)
task_init(task, name, map);
spinlock_lock(&task_list_lock);
- list_insert(&task_list, &kernel_task->node);
+ list_insert_head(&task_list, &kernel_task->node);
spinlock_unlock(&task_list_lock);
*taskp = task;
diff --git a/kern/thread.c b/kern/thread.c
index ecabbe54..6cd56ee9 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -784,7 +784,7 @@ thread_sched_ts_enqueue(struct thread_ts_runq *ts_runq, unsigned long round,
group->weight = group_weight;
/* Insert at the front of the group to improve interactivity */
- list_insert(&group->threads, &thread->ts_ctx.group_node);
+ list_insert_head(&group->threads, &thread->ts_ctx.group_node);
list_insert_tail(&ts_runq->threads, &thread->ts_ctx.runq_node);
thread->ts_ctx.ts_runq = ts_runq;
}
diff --git a/vm/vm_phys.c b/vm/vm_phys.c
index 9fa6e0a5..1cfc2290 100644
--- a/vm/vm_phys.c
+++ b/vm/vm_phys.c
@@ -172,7 +172,7 @@ vm_phys_free_list_insert(struct vm_phys_free_list *free_list,
assert(page->order == VM_PHYS_ORDER_ALLOCATED);
free_list->size++;
- list_insert(&free_list->blocks, &page->node);
+ list_insert_head(&free_list->blocks, &page->node);
}
static inline void
@@ -294,7 +294,7 @@ vm_phys_cpu_pool_push(struct vm_phys_cpu_pool *cpu_pool, struct vm_page *page)
{
assert(cpu_pool->nr_pages < cpu_pool->size);
cpu_pool->nr_pages++;
- list_insert(&cpu_pool->pages, &page->node);
+ list_insert_head(&cpu_pool->pages, &page->node);
}
static int