summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-04-17 22:23:03 +0200
committerRichard Braun <rbraun@sceen.net>2013-04-17 22:25:53 +0200
commit833ec80f937dd7080291907031f9984e3195a5f1 (patch)
treedeacf59b9e06871d8666e82f140f6b50fec06fa2 /vm
parent7fbccc553e8d43a19e68d24aaad41978d58b9696 (diff)
vm/vm_map: serialize access to the kentry slabs list
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_map.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c
index f533a627..70fcf874 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -104,6 +104,7 @@ static struct vm_map_entry vm_map_kentry_entry;
/*
* Kentry slab free list.
*/
+static struct mutex vm_map_kentry_free_slabs_lock;
static struct vm_map_kentry_slab *vm_map_kentry_free_slabs;
#ifdef NDEBUG
@@ -131,9 +132,12 @@ vm_map_kentry_alloc_slab(void)
if (vm_map_kentry_free_slabs == NULL)
panic("vm_map: kentry area exhausted");
+ mutex_lock(&vm_map_kentry_free_slabs_lock);
slab = vm_map_kentry_free_slabs;
- assert(slab->next != VM_MAP_KENTRY_ALLOCATED);
vm_map_kentry_free_slabs = slab->next;
+ mutex_unlock(&vm_map_kentry_free_slabs_lock);
+
+ assert(slab->next != VM_MAP_KENTRY_ALLOCATED);
slab->next = VM_MAP_KENTRY_ALLOCATED;
return slab;
}
@@ -142,8 +146,11 @@ static void
vm_map_kentry_free_slab(struct vm_map_kentry_slab *slab)
{
assert(slab->next == VM_MAP_KENTRY_ALLOCATED);
+
+ mutex_lock(&vm_map_kentry_free_slabs_lock);
slab->next = vm_map_kentry_free_slabs;
vm_map_kentry_free_slabs = slab;
+ mutex_unlock(&vm_map_kentry_free_slabs_lock);
}
static struct vm_map_kentry_slab *
@@ -279,6 +286,7 @@ vm_map_kentry_setup(void)
pmap_kupdate(table_va, table_va + (nr_pages * PAGE_SIZE));
+ mutex_init(&vm_map_kentry_free_slabs_lock);
slabs = (struct vm_map_kentry_slab *)table_va;
vm_map_kentry_free_slabs = &slabs[nr_slabs - 1];
vm_map_kentry_free_slabs->next = NULL;