diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_kmem.c | 30 | ||||
-rw-r--r-- | vm/vm_map.c | 131 | ||||
-rw-r--r-- | vm/vm_page.c | 67 |
3 files changed, 145 insertions, 83 deletions
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c index 3a0f9a93..a3d95613 100644 --- a/vm/vm_kmem.c +++ b/vm/vm_kmem.c @@ -40,8 +40,9 @@ static int vm_kmem_alloc_check(size_t size) { if (!vm_page_aligned(size) - || (size == 0)) + || (size == 0)) { return -1; + } return 0; } @@ -49,8 +50,9 @@ vm_kmem_alloc_check(size_t size) static int vm_kmem_free_check(unsigned long va, size_t size) { - if (!vm_page_aligned(va)) + if (!vm_page_aligned(va)) { return -1; + } return vm_kmem_alloc_check(size); } @@ -68,8 +70,9 @@ vm_kmem_alloc_va(size_t size) VM_ADV_DEFAULT, 0); error = vm_map_enter(kernel_map, &va, size, 0, flags, NULL, 0); - if (error) + if (error) { return 0; + } return (void *)va; } @@ -93,14 +96,16 @@ vm_kmem_alloc(size_t size) size = vm_page_round(size); va = (unsigned long)vm_kmem_alloc_va(size); - if (va == 0) + if (va == 0) { return 0; + } for (start = va, end = va + size; start < end; start += PAGE_SIZE) { page = vm_page_alloc(0, VM_PAGE_SEL_HIGHMEM, VM_PAGE_KERNEL); - if (page == NULL) + if (page == NULL) { goto error_page; + } pmap_enter(kernel_pmap, start, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE, PMAP_PEF_GLOBAL); @@ -119,8 +124,9 @@ error_page: size = end - start; - if (size != 0) + if (size != 0) { vm_kmem_free_va((void *)start, size); + } return NULL; } @@ -165,8 +171,9 @@ vm_kmem_map_pa(phys_addr_t pa, size_t size, map_size = vm_page_round(pa + size) - start; map_va = (unsigned long)vm_kmem_alloc_va(map_size); - if (map_va == 0) + if (map_va == 0) { return NULL; + } for (offset = 0; offset < map_size; offset += PAGE_SIZE) pmap_enter(kernel_pmap, map_va + offset, start + offset, @@ -174,11 +181,13 @@ vm_kmem_map_pa(phys_addr_t pa, size_t size, pmap_update(kernel_pmap); - if (map_vap != NULL) + if (map_vap != NULL) { *map_vap = map_va; + } - if (map_sizep != NULL) + if (map_sizep != NULL) { *map_sizep = map_size; + } return (void *)(map_va + (unsigned long)(pa & PAGE_MASK)); } @@ -192,8 +201,9 @@ vm_kmem_unmap_pa(unsigned long map_va, size_t map_size) cpumap = cpumap_all(); end = map_va + map_size; - for (va = map_va; va < end; va += PAGE_SIZE) + for (va = map_va; va < end; va += PAGE_SIZE) { pmap_remove(kernel_pmap, va, cpumap); + } pmap_update(kernel_pmap); vm_kmem_free_va((void *)map_va, map_size); diff --git a/vm/vm_map.c b/vm/vm_map.c index a5c37ec5..2c8c31a4 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -81,8 +81,9 @@ vm_map_entry_create(void) entry = kmem_cache_alloc(&vm_map_entry_cache); /* TODO Handle error */ - if (entry == NULL) + if (entry == NULL) { panic("vm_map: can't create map entry"); + } return entry; } @@ -100,11 +101,13 @@ vm_map_entry_cmp_lookup(unsigned long addr, const struct rbtree_node *node) entry = rbtree_entry(node, struct vm_map_entry, tree_node); - if (addr >= entry->end) + if (addr >= entry->end) { return 1; + } - if (addr >= entry->start) + if (addr >= entry->start) { return 0; + } return -1; } @@ -159,8 +162,9 @@ vm_map_lookup_nearest(struct vm_map *map, unsigned long addr) entry = map->lookup_cache; - if ((entry != NULL) && (addr >= entry->start) && (addr < entry->end)) + if ((entry != NULL) && (addr >= entry->start) && (addr < entry->end)) { return entry; + } node = rbtree_lookup_nearest(&map->entry_tree, addr, vm_map_entry_cmp_lookup, RBTREE_RIGHT); @@ -192,21 +196,24 @@ vm_map_find_fixed(struct vm_map *map, struct vm_map_request *request) start = request->start; size = request->size; - if ((start < map->start) || (start + size) > map->end) + if ((start < map->start) || (start + size) > map->end) { return ERROR_NOMEM; + } next = vm_map_lookup_nearest(map, start); if (next == NULL) { - if ((map->end - start) < size) + if ((map->end - start) < size) { return ERROR_NOMEM; + } request->next = NULL; return 0; } - if ((start >= next->start) || ((next->start - start) < size)) + if ((start >= next->start) || ((next->start - start) < size)) { return ERROR_NOMEM; + } request->next = next; return 0; @@ -225,16 +232,17 @@ vm_map_find_avail(struct vm_map *map, struct vm_map_request *request) if (request->start != 0) { error = vm_map_find_fixed(map, request); - if (!error) + if (!error) { return 0; + } } size = request->size; align = request->align; - if (size > map->find_cache_threshold) + if (size > map->find_cache_threshold) { base = map->find_cache; - else { + } else { base = map->start; /* @@ -252,8 +260,9 @@ retry: for (;;) { assert(start <= map->end); - if (align != 0) + if (align != 0) { start = P2ROUND(start, align); + } /* * The end of the map has been reached, and no space could be found. @@ -270,12 +279,13 @@ retry: return ERROR_NOMEM; } - if (next == NULL) + if (next == NULL) { space = map->end - start; - else if (start >= next->start) + } else if (start >= next->start) { space = 0; - else + } else { space = next->start - start; + } if (space >= size) { map->find_cache = start + size; @@ -284,16 +294,18 @@ retry: return 0; } - if (space > map->find_cache_threshold) + if (space > map->find_cache_threshold) { map->find_cache_threshold = space; + } start = next->end; node = list_next(&next->list_node); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { next = NULL; - else + } else { next = list_entry(node, struct vm_map_entry, list_node); + } } } @@ -304,10 +316,11 @@ vm_map_prev(struct vm_map *map, struct vm_map_entry *entry) node = list_prev(&entry->list_node); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { return NULL; - else + } else { return list_entry(node, struct vm_map_entry, list_node); + } } static inline struct vm_map_entry * @@ -317,10 +330,11 @@ vm_map_next(struct vm_map *map, struct vm_map_entry *entry) node = list_next(&entry->list_node); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { return NULL; - else + } else { return list_entry(node, struct vm_map_entry, list_node); + } } static void @@ -329,12 +343,13 @@ vm_map_link(struct vm_map *map, struct vm_map_entry *entry, { assert(entry->start < entry->end); - if ((prev == NULL) && (next == NULL)) + if ((prev == NULL) && (next == NULL)) { list_insert_tail(&map->entry_list, &entry->list_node); - else if (prev == NULL) + } else if (prev == NULL) { list_insert_before(&next->list_node, &entry->list_node); - else + } else { list_insert_after(&prev->list_node, &entry->list_node); + } rbtree_insert(&map->entry_tree, &entry->tree_node, vm_map_entry_cmp_insert); map->nr_entries++; @@ -345,8 +360,9 @@ vm_map_unlink(struct vm_map *map, struct vm_map_entry *entry) { assert(entry->start < entry->end); - if (map->lookup_cache == entry) + if (map->lookup_cache == entry) { map->lookup_cache = NULL; + } list_remove(&entry->list_node); rbtree_remove(&map->entry_tree, &entry->tree_node); @@ -373,10 +389,11 @@ vm_map_prepare(struct vm_map *map, unsigned long start, request->offset = offset; vm_map_request_assert_valid(request); - if (flags & VM_MAP_FIXED) + if (flags & VM_MAP_FIXED) { error = vm_map_find_fixed(map, request); - else + } else { error = vm_map_find_avail(map, request); + } return error; } @@ -408,11 +425,13 @@ vm_map_try_merge_prev(struct vm_map *map, const struct vm_map_request *request, assert(entry != NULL); - if (!vm_map_try_merge_compatible(request, entry)) + if (!vm_map_try_merge_compatible(request, entry)) { return NULL; + } - if (entry->end != request->start) + if (entry->end != request->start) { return NULL; + } prev = vm_map_prev(map, entry); next = vm_map_next(map, entry); @@ -431,13 +450,15 @@ vm_map_try_merge_next(struct vm_map *map, const struct vm_map_request *request, assert(entry != NULL); - if (!vm_map_try_merge_compatible(request, entry)) + if (!vm_map_try_merge_compatible(request, entry)) { return NULL; + } end = request->start + request->size; - if (end != entry->start) + if (end != entry->start) { return NULL; + } prev = vm_map_prev(map, entry); next = vm_map_next(map, entry); @@ -474,8 +495,9 @@ vm_map_try_merge_near(struct vm_map *map, const struct vm_map_request *request, entry = vm_map_try_merge_prev(map, request, first); - if (entry != NULL) + if (entry != NULL) { return entry; + } return vm_map_try_merge_next(map, request, second); } @@ -492,18 +514,18 @@ vm_map_try_merge(struct vm_map *map, const struct vm_map_request *request) if (request->next == NULL) { node = list_last(&map->entry_list); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { entry = NULL; - else { + } else { prev = list_entry(node, struct vm_map_entry, list_node); entry = vm_map_try_merge_prev(map, request, prev); } } else { node = list_prev(&request->next->list_node); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { entry = vm_map_try_merge_next(map, request, request->next); - else { + } else { prev = list_entry(node, struct vm_map_entry, list_node); entry = vm_map_try_merge_near(map, request, prev, request->next); } @@ -524,8 +546,9 @@ vm_map_insert(struct vm_map *map, struct vm_map_entry *entry, if (entry == NULL) { entry = vm_map_try_merge(map, request); - if (entry != NULL) + if (entry != NULL) { goto out; + } entry = vm_map_entry_create(); } @@ -555,13 +578,15 @@ vm_map_enter(struct vm_map *map, unsigned long *startp, error = vm_map_prepare(map, *startp, size, align, flags, object, offset, &request); - if (error) + if (error) { goto error_enter; + } error = vm_map_insert(map, NULL, &request); - if (error) + if (error) { goto error_enter; + } mutex_unlock(&map->lock); @@ -584,8 +609,9 @@ vm_map_split_entries(struct vm_map_entry *prev, struct vm_map_entry *next, prev->end = split_addr; next->start = split_addr; - if (next->object != NULL) + if (next->object != NULL) { next->offset += delta; + } } static void @@ -594,8 +620,9 @@ vm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, { struct vm_map_entry *new_entry, *next; - if ((start <= entry->start) || (start >= entry->end)) + if ((start <= entry->start) || (start >= entry->end)) { return; + } next = vm_map_next(map, entry); vm_map_unlink(map, entry); @@ -612,8 +639,9 @@ vm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, { struct vm_map_entry *new_entry, *prev; - if ((end <= entry->start) || (end >= entry->end)) + if ((end <= entry->start) || (end >= entry->end)) { return; + } prev = vm_map_prev(map, entry); vm_map_unlink(map, entry); @@ -638,8 +666,9 @@ vm_map_remove(struct vm_map *map, unsigned long start, unsigned long end) entry = vm_map_lookup_nearest(map, start); - if (entry == NULL) + if (entry == NULL) { goto out; + } vm_map_clip_start(map, entry, start); @@ -652,8 +681,9 @@ vm_map_remove(struct vm_map *map, unsigned long start, unsigned long end) /* TODO Defer destruction to shorten critical section */ vm_map_entry_destroy(entry); - if (list_end(&map->entry_list, node)) + if (list_end(&map->entry_list, node)) { break; + } entry = list_entry(node, struct vm_map_entry, list_node); } @@ -711,8 +741,9 @@ vm_map_create(struct vm_map **mapp) error = pmap_create(&pmap); - if (error) + if (error) { goto error_pmap; + } vm_map_init(map, pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS); *mapp = map; @@ -730,10 +761,11 @@ vm_map_info(struct vm_map *map) struct vm_map_entry *entry; const char *type, *name; - if (map == kernel_map) + if (map == kernel_map) { name = "kernel map"; - else + } else { name = "map"; + } mutex_lock(&map->lock); @@ -742,10 +774,11 @@ vm_map_info(struct vm_map *map) "size offset flags type\n", name, map->start, map->end); list_for_each_entry(&map->entry_list, entry, list_node) { - if (entry->object == NULL) + if (entry->object == NULL) { type = "null"; - else + } else { type = "object"; + } printk("vm_map: %016lx %016lx %8luk %08llx %08x %s\n", entry->start, entry->end, (entry->end - entry->start) >> 10, entry->offset, diff --git a/vm/vm_page.c b/vm/vm_page.c index 21fdfe37..8dec59e8 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -175,8 +175,9 @@ vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type) nr_pages = 1 << order; - for (i = 0; i < nr_pages; i++) + for (i = 0; i < nr_pages; i++) { page[i].type = type; + } } static void __init @@ -218,12 +219,14 @@ vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order) for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) { free_list = &seg->free_lists[i]; - if (free_list->size != 0) + if (free_list->size != 0) { break; + } } - if (i == VM_PAGE_NR_FREE_LISTS) + if (i == VM_PAGE_NR_FREE_LISTS) { return NULL; + } page = list_first_entry(&free_list->blocks, struct vm_page, node); vm_page_free_list_remove(free_list, page); @@ -259,13 +262,15 @@ vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page, while (order < (VM_PAGE_NR_FREE_LISTS - 1)) { buddy_pa = pa ^ vm_page_ptoa(1 << order); - if ((buddy_pa < seg->start) || (buddy_pa >= seg->end)) + if ((buddy_pa < seg->start) || (buddy_pa >= seg->end)) { break; + } buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)]; - if (buddy->order != order) + if (buddy->order != order) { break; + } vm_page_free_list_remove(&seg->free_lists[order], buddy); buddy->order = VM_PAGE_ORDER_UNLISTED; @@ -330,8 +335,9 @@ vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool, for (i = 0; i < cpu_pool->transfer_size; i++) { page = vm_page_seg_alloc_from_buddy(seg, 0); - if (page == NULL) + if (page == NULL) { break; + } vm_page_cpu_pool_push(cpu_pool, page); } @@ -373,10 +379,11 @@ vm_page_seg_compute_pool_size(struct vm_page_seg *seg) size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO; - if (size == 0) + if (size == 0) { size = 1; - else if (size > VM_PAGE_CPU_POOL_MAX_SIZE) + } else if (size > VM_PAGE_CPU_POOL_MAX_SIZE) { size = VM_PAGE_CPU_POOL_MAX_SIZE; + } return size; } @@ -393,21 +400,24 @@ vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end, seg->end = end; pool_size = vm_page_seg_compute_pool_size(seg); - for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++) + for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++) { vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size); + } seg->pages = pages; seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg)); mutex_init(&seg->lock); - for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++) + for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++) { vm_page_free_list_init(&seg->free_lists[i]); + } seg->nr_free_pages = 0; i = seg - vm_page_segs; - for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE) + for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE) { vm_page_init(&pages[vm_page_atop(pa - seg->start)], i, pa); + } } static struct vm_page * @@ -443,8 +453,9 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order, page = vm_page_seg_alloc_from_buddy(seg, order); mutex_unlock(&seg->lock); - if (page == NULL) + if (page == NULL) { return NULL; + } } assert(page->type == VM_PAGE_FREE); @@ -468,8 +479,9 @@ vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page, cpu_pool = vm_page_cpu_pool_get(seg); mutex_lock(&cpu_pool->lock); - if (cpu_pool->nr_pages == cpu_pool->size) + if (cpu_pool->nr_pages == cpu_pool->size) { vm_page_cpu_pool_drain(cpu_pool, seg); + } vm_page_cpu_pool_push(cpu_pool, page); mutex_unlock(&cpu_pool->lock); @@ -574,14 +586,16 @@ vm_page_check_boot_segs(void) unsigned int i; int expect_loaded; - if (vm_page_segs_size == 0) + if (vm_page_segs_size == 0) { panic("vm_page: no physical memory loaded"); + } for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) { expect_loaded = (i < vm_page_segs_size); - if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded) + if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded) { continue; + } panic("vm_page: invalid boot segment table"); } @@ -643,8 +657,9 @@ vm_page_setup(void) */ nr_pages = 0; - for (i = 0; i < vm_page_segs_size; i++) + for (i = 0; i < vm_page_segs_size; i++) { nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i])); + } table_size = vm_page_round(nr_pages * sizeof(struct vm_page)); printk("vm_page: page table size: %zu entries (%zuk)\n", nr_pages, @@ -705,8 +720,9 @@ vm_page_lookup(phys_addr_t pa) for (i = 0; i < vm_page_segs_size; i++) { seg = &vm_page_segs[i]; - if ((pa >= seg->start) && (pa < seg->end)) + if ((pa >= seg->start) && (pa < seg->end)) { return &seg->pages[vm_page_atop(pa - seg->start)]; + } } return NULL; @@ -721,12 +737,14 @@ vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type) for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) { page = vm_page_seg_alloc(&vm_page_segs[i], order, type); - if (page != NULL) + if (page != NULL) { return page; + } } - if (type == VM_PAGE_PMAP) + if (type == VM_PAGE_PMAP) { panic("vm_page: unable to allocate pmap page"); + } return NULL; } @@ -743,16 +761,17 @@ const char * vm_page_seg_name(unsigned int seg_index) { /* Don't use a switch statement since segments can be aliased */ - if (seg_index == VM_PAGE_SEG_HIGHMEM) + if (seg_index == VM_PAGE_SEG_HIGHMEM) { return "HIGHMEM"; - else if (seg_index == VM_PAGE_SEG_DIRECTMAP) + } else if (seg_index == VM_PAGE_SEG_DIRECTMAP) { return "DIRECTMAP"; - else if (seg_index == VM_PAGE_SEG_DMA32) + } else if (seg_index == VM_PAGE_SEG_DMA32) { return "DMA32"; - else if (seg_index == VM_PAGE_SEG_DMA) + } else if (seg_index == VM_PAGE_SEG_DMA) { return "DMA"; - else + } else { panic("vm_page: invalid segment index"); + } } void |