diff options
author | Richard Braun <rbraun@sceen.net> | 2017-01-11 21:31:53 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-01-11 21:31:53 +0100 |
commit | 135f428f0a50eb9988f0b40a60357dfedbcc7f18 (patch) | |
tree | 38eb76350879b55227295a2fa31c7bdaffae08f9 /vm/vm_map.c | |
parent | 0a7bb2b9e2441cd0610a0687f39a38b5c66a6f46 (diff) |
kern/kmem: rework slab allocation
Allocating slabs from the page allocator only is likely to cause
fragmentation. Instead, allocate larger-than-page slabs from
kernel virtual memory, and page-sized slabs from the page allocator.
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r-- | vm/vm_map.c | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index 2c8c31a4..78ff2cc3 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -720,7 +720,8 @@ vm_map_setup(void) vm_map_init(kernel_map, kernel_pmap, VM_MIN_KMEM_ADDRESS, VM_MAX_KMEM_ADDRESS); kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, 0); + sizeof(struct vm_map_entry), 0, NULL, + KMEM_CACHE_PAGE_ONLY); kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, NULL, 0); } |