diff options
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r-- | vm/vm_map.c | 586 |
1 files changed, 255 insertions, 331 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index 2be71471..89a2b382 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -58,20 +58,6 @@ #include <vm/vm_print.h> #endif /* MACH_KDB */ - -/* Forward declarations */ -kern_return_t vm_map_delete( - vm_map_t map, - vm_offset_t start, - vm_offset_t end); - -kern_return_t vm_map_copyout_page_list( - vm_map_t dst_map, - vm_offset_t *dst_addr, /* OUT */ - vm_map_copy_t copy); - -void vm_map_copy_page_discard (vm_map_copy_t copy); - /* * Macros to copy a vm_map_entry. We must be careful to correctly * manage the wired page count. vm_map_entry_copy() creates a new @@ -140,11 +126,8 @@ MACRO_END struct kmem_cache vm_map_cache; /* cache for vm_map structures */ struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ -struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ -boolean_t vm_map_lookup_entry(); /* forward declaration */ - /* * Placeholder object for submap operations. This object is dropped * into the range by a call to vm_map_find, and removed when @@ -163,58 +146,36 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * Map and entry structures are allocated from caches -- we must * initialize those caches. * - * There are three caches of interest: + * There are two caches of interest: * * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. - * vm_map_kentry_cache: used to allocate map entries for the kernel. * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. + * We make sure the map entry cache allocates memory directly from the + * physical allocator to avoid recursion with this module. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); + NULL, 0); kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, NULL, + KMEM_CACHE_NOOFFSLAB | KMEM_CACHE_PHYSMEM); kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + sizeof(struct vm_map_copy), 0, NULL, 0); /* * Submap object is initialized by vm_object_init. */ } -void vm_map_setup(map, pmap, min, max, pageable) - vm_map_t map; - pmap_t pmap; - vm_offset_t min, max; - boolean_t pageable; +void vm_map_setup( + vm_map_t map, + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable) { vm_map_first_entry(map) = vm_map_to_entry(map); vm_map_last_entry(map) = vm_map_to_entry(map); @@ -223,6 +184,7 @@ void vm_map_setup(map, pmap, min, max, pageable) rbtree_init(&map->hdr.tree); map->size = 0; + map->user_wired = 0; map->ref_count = 1; map->pmap = pmap; map->min_offset = min; @@ -243,12 +205,13 @@ void vm_map_setup(map, pmap, min, max, pageable) * the given physical map structure, and having * the given lower and upper address bounds. */ -vm_map_t vm_map_create(pmap, min, max, pageable) - pmap_t pmap; - vm_offset_t min, max; - boolean_t pageable; +vm_map_t vm_map_create( + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable) { - register vm_map_t result; + vm_map_t result; result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); if (result == VM_MAP_NULL) @@ -272,17 +235,11 @@ vm_map_t vm_map_create(pmap, min, max, pageable) _vm_map_entry_create(&(copy)->cpy_hdr) vm_map_entry_t _vm_map_entry_create(map_header) - register struct vm_map_header *map_header; + const struct vm_map_header *map_header; { - register kmem_cache_t cache; - register vm_map_entry_t entry; - - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; + vm_map_entry_t entry; - entry = (vm_map_entry_t) kmem_cache_alloc(cache); + entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -301,17 +258,12 @@ vm_map_entry_t _vm_map_entry_create(map_header) _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) void _vm_map_entry_dispose(map_header, entry) - register struct vm_map_header *map_header; - register vm_map_entry_t entry; + const struct vm_map_header *map_header; + vm_map_entry_t entry; { - register kmem_cache_t cache; - - if (map_header->entries_pageable) - cache = &vm_map_entry_cache; - else - cache = &vm_map_kentry_cache; + (void)map_header; - kmem_cache_free(cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); } /* @@ -386,8 +338,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a, * Creates another valid reference to the given map. * */ -void vm_map_reference(map) - register vm_map_t map; +void vm_map_reference(vm_map_t map) { if (map == VM_MAP_NULL) return; @@ -404,10 +355,9 @@ void vm_map_reference(map) * destroying it if no references remain. * The map should not be locked. */ -void vm_map_deallocate(map) - register vm_map_t map; +void vm_map_deallocate(vm_map_t map) { - register int c; + int c; if (map == VM_MAP_NULL) return; @@ -449,13 +399,13 @@ void vm_map_deallocate(map) * result indicates whether the address is * actually contained in the map. */ -boolean_t vm_map_lookup_entry(map, address, entry) - register vm_map_t map; - register vm_offset_t address; - vm_map_entry_t *entry; /* OUT */ +boolean_t vm_map_lookup_entry( + vm_map_t map, + vm_offset_t address, + vm_map_entry_t *entry) /* OUT */ { - register struct rbtree_node *node; - register vm_map_entry_t hint; + struct rbtree_node *node; + vm_map_entry_t hint; /* * First, make a quick check to see if we are already @@ -506,10 +456,11 @@ boolean_t vm_map_lookup_entry(map, address, entry) */ boolean_t -invalid_user_access(map, start, end, prot) - vm_map_t map; - vm_offset_t start, end; - vm_prot_t prot; +invalid_user_access( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot) { vm_map_entry_t entry; @@ -533,17 +484,17 @@ invalid_user_access(map, start, end, prot) * are initialized to zero. If an object is supplied, * then an existing entry may be extended. */ -kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) - register vm_map_t map; - vm_offset_t *address; /* OUT */ - vm_size_t size; - vm_offset_t mask; - vm_object_t object; - vm_map_entry_t *o_entry; /* OUT */ +kern_return_t vm_map_find_entry( + vm_map_t map, + vm_offset_t *address, /* OUT */ + vm_size_t size, + vm_offset_t mask, + vm_object_t object, + vm_map_entry_t *o_entry) /* OUT */ { - register vm_map_entry_t entry, new_entry; - register vm_offset_t start; - register vm_offset_t end; + vm_map_entry_t entry, new_entry; + vm_offset_t start; + vm_offset_t end; /* * Look for the first possible address; @@ -562,7 +513,7 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) */ while (TRUE) { - register vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -687,8 +638,8 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) return(KERN_SUCCESS); } -int vm_map_pmap_enter_print = FALSE; -int vm_map_pmap_enter_enable = FALSE; +boolean_t vm_map_pmap_enter_print = FALSE; +boolean_t vm_map_pmap_enter_enable = FALSE; /* * Routine: vm_map_pmap_enter @@ -705,19 +656,16 @@ int vm_map_pmap_enter_enable = FALSE; * The source map should not be locked on entry. */ void -vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) - vm_map_t map; - register - vm_offset_t addr; - register - vm_offset_t end_addr; - register - vm_object_t object; - vm_offset_t offset; - vm_prot_t protection; +vm_map_pmap_enter( + vm_map_t map, + vm_offset_t addr, + vm_offset_t end_addr, + vm_object_t object, + vm_offset_t offset, + vm_prot_t protection) { while (addr < end_addr) { - register vm_page_t m; + vm_page_t m; vm_object_lock(object); vm_object_paging_begin(object); @@ -766,27 +714,22 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) * Arguments are as defined in the vm_map call. */ kern_return_t vm_map_enter( - map, - address, size, mask, anywhere, - object, offset, needs_copy, - cur_protection, max_protection, inheritance) - register - vm_map_t map; - vm_offset_t *address; /* IN/OUT */ - vm_size_t size; - vm_offset_t mask; - boolean_t anywhere; - vm_object_t object; - vm_offset_t offset; - boolean_t needs_copy; - vm_prot_t cur_protection; - vm_prot_t max_protection; - vm_inherit_t inheritance; + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_object_t object, + vm_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - register vm_map_entry_t entry; - register vm_offset_t start; - register vm_offset_t end; - kern_return_t result = KERN_SUCCESS; + vm_map_entry_t entry; + vm_offset_t start; + vm_offset_t end; + kern_return_t result = KERN_SUCCESS; #define RETURN(value) { result = value; goto BailOut; } @@ -832,7 +775,7 @@ kern_return_t vm_map_enter( */ while (TRUE) { - register vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -980,7 +923,7 @@ kern_return_t vm_map_enter( */ /**/ { - register vm_map_entry_t new_entry; + vm_map_entry_t new_entry; new_entry = vm_map_entry_create(map); @@ -1051,14 +994,12 @@ kern_return_t vm_map_enter( * the specified address; if necessary, * it splits the entry into two. */ -void _vm_map_clip_start(); #define vm_map_clip_start(map, entry, startaddr) \ MACRO_BEGIN \ if ((startaddr) > (entry)->vme_start) \ _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \ MACRO_END -void _vm_map_copy_clip_start(); #define vm_map_copy_clip_start(copy, entry, startaddr) \ MACRO_BEGIN \ if ((startaddr) > (entry)->vme_start) \ @@ -1069,12 +1010,12 @@ void _vm_map_copy_clip_start(); * This routine is called only when it is known that * the entry must be split. */ -void _vm_map_clip_start(map_header, entry, start) - register struct vm_map_header *map_header; - register vm_map_entry_t entry; - register vm_offset_t start; +void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t start) { - register vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Split off the front portion -- @@ -1106,14 +1047,12 @@ void _vm_map_clip_start(map_header, entry, start) * the specified address; if necessary, * it splits the entry into two. */ -void _vm_map_clip_end(); #define vm_map_clip_end(map, entry, endaddr) \ MACRO_BEGIN \ if ((endaddr) < (entry)->vme_end) \ _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \ MACRO_END -void _vm_map_copy_clip_end(); #define vm_map_copy_clip_end(copy, entry, endaddr) \ MACRO_BEGIN \ if ((endaddr) < (entry)->vme_end) \ @@ -1124,12 +1063,12 @@ void _vm_map_copy_clip_end(); * This routine is called only when it is known that * the entry must be split. */ -void _vm_map_clip_end(map_header, entry, end) - register struct vm_map_header *map_header; - register vm_map_entry_t entry; - register vm_offset_t end; +void _vm_map_clip_end( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t end) { - register vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Create a new entry and insert it @@ -1184,15 +1123,15 @@ void _vm_map_clip_end(map_header, entry, end) * range from the superior map, and then destroy the * submap (if desired). [Better yet, don't try it.] */ -kern_return_t vm_map_submap(map, start, end, submap) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; - vm_map_t submap; +kern_return_t vm_map_submap( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_map_t submap) { vm_map_entry_t entry; - register kern_return_t result = KERN_INVALID_ARGUMENT; - register vm_object_t object; + kern_return_t result = KERN_INVALID_ARGUMENT; + vm_object_t object; vm_map_lock(map); @@ -1232,15 +1171,15 @@ kern_return_t vm_map_submap(map, start, end, submap) * specified, the maximum protection is to be set; * otherwise, only the current protection is affected. */ -kern_return_t vm_map_protect(map, start, end, new_prot, set_max) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; - register vm_prot_t new_prot; - register boolean_t set_max; +kern_return_t vm_map_protect( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) { - register vm_map_entry_t current; - vm_map_entry_t entry; + vm_map_entry_t current; + vm_map_entry_t entry; vm_map_lock(map); @@ -1320,13 +1259,13 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max) * affects how the map will be shared with * child maps at the time of vm_map_fork. */ -kern_return_t vm_map_inherit(map, start, end, new_inheritance) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; - register vm_inherit_t new_inheritance; +kern_return_t vm_map_inherit( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_inherit_t new_inheritance) { - register vm_map_entry_t entry; + vm_map_entry_t entry; vm_map_entry_t temp_entry; vm_map_lock(map); @@ -1369,14 +1308,14 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance) * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable, * or vm_map_pageable_user); don't call vm_map_pageable directly. */ -kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; - register vm_prot_t access_type; - boolean_t user_wire; +kern_return_t vm_map_pageable_common( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t access_type, + boolean_t user_wire) { - register vm_map_entry_t entry; + vm_map_entry_t entry; vm_map_entry_t start_entry; vm_map_lock(map); @@ -1436,7 +1375,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) if (user_wire) { if (--(entry->user_wired_count) == 0) + { + map->user_wired -= entry->vme_end - entry->vme_start; entry->wired_count--; + } } else { entry->wired_count--; @@ -1513,7 +1455,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) if (user_wire) { if ((entry->user_wired_count)++ == 0) + { + map->user_wired += entry->vme_end - entry->vme_start; entry->wired_count++; + } } else { entry->wired_count++; @@ -1539,7 +1484,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) (entry->vme_end > start)) { if (user_wire) { if (--(entry->user_wired_count) == 0) + { + map->user_wired -= entry->vme_end - entry->vme_start; entry->wired_count--; + } } else { entry->wired_count--; @@ -1618,12 +1566,12 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) * * Deallocate the given entry from the target map. */ -void vm_map_entry_delete(map, entry) - register vm_map_t map; - register vm_map_entry_t entry; +void vm_map_entry_delete( + vm_map_t map, + vm_map_entry_t entry) { - register vm_offset_t s, e; - register vm_object_t object; + vm_offset_t s, e; + vm_object_t object; extern vm_object_t kernel_object; s = entry->vme_start; @@ -1654,6 +1602,8 @@ void vm_map_entry_delete(map, entry) if (entry->wired_count != 0) { vm_fault_unwire(map, entry); entry->wired_count = 0; + if (entry->user_wired_count) + map->user_wired -= entry->vme_end - entry->vme_start; entry->user_wired_count = 0; } @@ -1702,10 +1652,10 @@ void vm_map_entry_delete(map, entry) * map. */ -kern_return_t vm_map_delete(map, start, end) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; +kern_return_t vm_map_delete( + vm_map_t map, + vm_offset_t start, + vm_offset_t end) { vm_map_entry_t entry; vm_map_entry_t first_entry; @@ -1785,12 +1735,12 @@ kern_return_t vm_map_delete(map, start, end) * Remove the given address range from the target map. * This is the exported form of vm_map_delete. */ -kern_return_t vm_map_remove(map, start, end) - register vm_map_t map; - register vm_offset_t start; - register vm_offset_t end; +kern_return_t vm_map_remove( + vm_map_t map, + vm_offset_t start, + vm_offset_t end) { - register kern_return_t result; + kern_return_t result; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); @@ -1808,12 +1758,11 @@ kern_return_t vm_map_remove(map, start, end) * that have not already been stolen. */ void -vm_map_copy_steal_pages(copy) -vm_map_copy_t copy; +vm_map_copy_steal_pages(vm_map_copy_t copy) { - register vm_page_t m, new_m; - register int i; - vm_object_t object; + vm_page_t m, new_m; + int i; + vm_object_t object; for (i = 0; i < copy->cpy_npages; i++) { @@ -1855,8 +1804,7 @@ vm_map_copy_t copy; * stolen, they are freed. If the pages are not stolen, they * are unbusied, and associated state is cleaned up. */ -void vm_map_copy_page_discard(copy) -vm_map_copy_t copy; +void vm_map_copy_page_discard(vm_map_copy_t copy) { while (copy->cpy_npages > 0) { vm_page_t m; @@ -1901,8 +1849,7 @@ vm_map_copy_t copy; * vm_map_copyin). */ void -vm_map_copy_discard(copy) - vm_map_copy_t copy; +vm_map_copy_discard(vm_map_copy_t copy) { free_next_copy: if (copy == VM_MAP_COPY_NULL) @@ -1943,7 +1890,7 @@ free_next_copy: * here to avoid tail recursion. */ if (copy->cpy_cont == vm_map_copy_discard_cont) { - register vm_map_copy_t new_copy; + vm_map_copy_t new_copy; new_copy = (vm_map_copy_t) copy->cpy_cont_args; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); @@ -1978,8 +1925,7 @@ free_next_copy: * deallocation will not fail. */ vm_map_copy_t -vm_map_copy_copy(copy) - vm_map_copy_t copy; +vm_map_copy_copy(vm_map_copy_t copy) { vm_map_copy_t new_copy; @@ -2025,9 +1971,9 @@ vm_map_copy_copy(copy) * A version of vm_map_copy_discard that can be called * as a continuation from a vm_map_copy page list. */ -kern_return_t vm_map_copy_discard_cont(cont_args, copy_result) -vm_map_copyin_args_t cont_args; -vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copy_discard_cont( +vm_map_copyin_args_t cont_args, +vm_map_copy_t *copy_result) /* OUT */ { vm_map_copy_discard((vm_map_copy_t) cont_args); if (copy_result != (vm_map_copy_t *)0) @@ -2082,11 +2028,11 @@ vm_map_copy_t *copy_result; /* OUT */ * atomically and interruptibly, an error indication is * returned. */ -kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible) - vm_map_t dst_map; - vm_offset_t dst_addr; - vm_map_copy_t copy; - boolean_t interruptible; +kern_return_t vm_map_copy_overwrite( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible) { vm_size_t size; vm_offset_t start; @@ -2305,6 +2251,8 @@ start_pass_1: entry->offset = copy_entry->offset; entry->needs_copy = copy_entry->needs_copy; entry->wired_count = 0; + if (entry->user_wired_count) + dst_map->user_wired -= entry->vme_end - entry->vme_start; entry->user_wired_count = 0; vm_map_copy_entry_unlink(copy, copy_entry); @@ -2459,19 +2407,16 @@ start_pass_1: * If successful, consumes the copy object. * Otherwise, the caller is responsible for it. */ -kern_return_t vm_map_copyout(dst_map, dst_addr, copy) - register - vm_map_t dst_map; - vm_offset_t *dst_addr; /* OUT */ - register - vm_map_copy_t copy; +kern_return_t vm_map_copyout( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { vm_size_t size; vm_size_t adjustment; vm_offset_t start; vm_offset_t vm_copy_start; vm_map_entry_t last; - register vm_map_entry_t entry; /* @@ -2559,15 +2504,8 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Mismatches occur when dealing with the default * pager. */ - kmem_cache_t old_cache; vm_map_entry_t next, new; - /* - * Find the cache that the copies were allocated from - */ - old_cache = (copy->cpy_hdr.entries_pageable) - ? &vm_map_entry_cache - : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2576,6 +2514,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) */ copy->cpy_hdr.nentries = 0; copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable; + rbtree_init(©->cpy_hdr.tree); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); @@ -2590,7 +2529,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) vm_map_copy_last_entry(copy), new); next = entry->vme_next; - kmem_cache_free(old_cache, (vm_offset_t) entry); + kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry); entry = next; } } @@ -2617,9 +2556,9 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * map the pages into the destination map. */ if (entry->wired_count != 0) { - register vm_offset_t va; - vm_offset_t offset; - register vm_object_t object; + vm_offset_t va; + vm_offset_t offset; + vm_object_t object; object = entry->object.vm_object; offset = entry->offset; @@ -2631,7 +2570,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) TRUE); while (va < entry->vme_end) { - register vm_page_t m; + vm_page_t m; /* * Look up the page in the object. @@ -2716,19 +2655,16 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Version of vm_map_copyout() for page list vm map copies. * */ -kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy) - register - vm_map_t dst_map; - vm_offset_t *dst_addr; /* OUT */ - register - vm_map_copy_t copy; +kern_return_t vm_map_copyout_page_list( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { vm_size_t size; vm_offset_t start; vm_offset_t end; vm_offset_t offset; vm_map_entry_t last; - register vm_object_t object; vm_page_t *page_list, m; vm_map_entry_t entry; @@ -2906,6 +2842,7 @@ create_object: if (must_wire) { entry->wired_count = 1; + dst_map->user_wired += entry->vme_end - entry->vme_start; entry->user_wired_count = 1; } else { entry->wired_count = 0; @@ -3106,12 +3043,12 @@ error: * In/out conditions: * The source map should not be locked on entry. */ -kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) - vm_map_t src_map; - vm_offset_t src_addr; - vm_size_t len; - boolean_t src_destroy; - vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) /* OUT */ { vm_map_entry_t tmp_entry; /* Result of last map lookup -- * in multi-level lookup, this @@ -3125,7 +3062,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) vm_offset_t src_end; /* End of entire region to be * copied */ - register vm_map_copy_t copy; /* Resulting copy */ /* @@ -3192,14 +3128,12 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) */ while (TRUE) { - register vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ vm_size_t src_size; /* Size of source * map entry (in both * maps) */ - register vm_object_t src_object; /* Object to copy */ vm_offset_t src_offset; @@ -3208,7 +3142,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * for copy-on-write? */ - register vm_map_entry_t new_entry; /* Map entry for copy */ boolean_t new_entry_needs_copy; /* Will new entry be COW? */ @@ -3472,11 +3405,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * Our caller donates an object reference. */ -kern_return_t vm_map_copyin_object(object, offset, size, copy_result) - vm_object_t object; - vm_offset_t offset; /* offset of region in object */ - vm_size_t size; /* size of region in object */ - vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin_object( + vm_object_t object, + vm_offset_t offset, /* offset of region in object */ + vm_size_t size, /* size of region in object */ + vm_map_copy_t *copy_result) /* OUT */ { vm_map_copy_t copy; /* Resulting copy */ @@ -3517,12 +3450,12 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result) * the scheduler. */ -kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result) -vm_map_copyin_args_t cont_args; -vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin_page_list_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result) /* OUT */ { kern_return_t result = 0; /* '=0' to quiet gcc warnings */ - register boolean_t do_abort, src_destroy, src_destroy_only; + boolean_t do_abort, src_destroy, src_destroy_only; /* * Check for cases that only require memory destruction. @@ -3573,27 +3506,23 @@ vm_map_copy_t *copy_result; /* OUT */ * the recipient of this copy_result must be prepared to deal with it. */ -kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, - steal_pages, copy_result, is_cont) - vm_map_t src_map; - vm_offset_t src_addr; - vm_size_t len; - boolean_t src_destroy; - boolean_t steal_pages; - vm_map_copy_t *copy_result; /* OUT */ - boolean_t is_cont; +kern_return_t vm_map_copyin_page_list( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + boolean_t steal_pages, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t is_cont) { vm_map_entry_t src_entry; vm_page_t m; vm_offset_t src_start; vm_offset_t src_end; vm_size_t src_size; - register vm_object_t src_object; - register vm_offset_t src_offset; vm_offset_t src_last_offset; - register vm_map_copy_t copy; /* Resulting copy */ kern_return_t result = KERN_SUCCESS; boolean_t need_map_lookup; @@ -3927,7 +3856,7 @@ retry: */ src_start = trunc_page(src_addr); if (steal_pages) { - register int i; + int i; vm_offset_t unwire_end; unwire_end = src_start; @@ -3999,6 +3928,8 @@ retry: assert(src_entry->wired_count > 0); src_entry->wired_count = 0; + if (src_entry->user_wired_count) + src_map->user_wired -= src_entry->vme_end - src_entry->vme_start; src_entry->user_wired_count = 0; unwire_end = src_entry->vme_end; pmap_pageable(vm_map_pmap(src_map), @@ -4104,18 +4035,14 @@ error: * * The source map must not be locked. */ -vm_map_t vm_map_fork(old_map) - vm_map_t old_map; +vm_map_t vm_map_fork(vm_map_t old_map) { vm_map_t new_map; - register vm_map_entry_t old_entry; - register vm_map_entry_t new_entry; pmap_t new_pmap = pmap_create((vm_size_t) 0); vm_size_t new_size = 0; vm_size_t entry_size; - register vm_object_t object; vm_map_lock(old_map); @@ -4378,21 +4305,20 @@ vm_map_t vm_map_fork(old_map) * copying operations, although the data referenced will * remain the same. */ -kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, - object, offset, out_prot, wired) - vm_map_t *var_map; /* IN/OUT */ - register vm_offset_t vaddr; - register vm_prot_t fault_type; - - vm_map_version_t *out_version; /* OUT */ - vm_object_t *object; /* OUT */ - vm_offset_t *offset; /* OUT */ - vm_prot_t *out_prot; /* OUT */ - boolean_t *wired; /* OUT */ +kern_return_t vm_map_lookup( + vm_map_t *var_map, /* IN/OUT */ + vm_offset_t vaddr, + vm_prot_t fault_type, + + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired) /* OUT */ { - register vm_map_entry_t entry; - register vm_map_t map = *var_map; - register vm_prot_t prot; + vm_map_entry_t entry; + vm_map_t map = *var_map; + vm_prot_t prot; RetryLookup: ; @@ -4560,11 +4486,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, * since the given version. If successful, the map * will not change until vm_map_verify_done() is called. */ -boolean_t vm_map_verify(map, version) - register - vm_map_t map; - register - vm_map_version_t *version; /* REF */ +boolean_t vm_map_verify( + vm_map_t map, + vm_map_version_t *version) /* REF */ { boolean_t result; @@ -4593,24 +4517,19 @@ boolean_t vm_map_verify(map, version) * a task's address map. */ -kern_return_t vm_region(map, address, size, - protection, max_protection, - inheritance, is_shared, - object_name, offset_in_object) - vm_map_t map; - vm_offset_t *address; /* IN/OUT */ - vm_size_t *size; /* OUT */ - vm_prot_t *protection; /* OUT */ - vm_prot_t *max_protection; /* OUT */ - vm_inherit_t *inheritance; /* OUT */ - boolean_t *is_shared; /* OUT */ - ipc_port_t *object_name; /* OUT */ - vm_offset_t *offset_in_object; /* OUT */ +kern_return_t vm_region( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_prot_t *protection, /* OUT */ + vm_prot_t *max_protection, /* OUT */ + vm_inherit_t *inheritance, /* OUT */ + boolean_t *is_shared, /* OUT */ + ipc_port_t *object_name, /* OUT */ + vm_offset_t *offset_in_object) /* OUT */ { vm_map_entry_t tmp_entry; - register vm_map_entry_t entry; - register vm_offset_t tmp_offset; vm_offset_t start; @@ -4667,9 +4586,9 @@ kern_return_t vm_region(map, address, size, * at allocation time because the adjacent entry * is often wired down. */ -void vm_map_simplify(map, start) - vm_map_t map; - vm_offset_t start; +void vm_map_simplify( + vm_map_t map, + vm_offset_t start) { vm_map_entry_t this_entry; vm_map_entry_t prev_entry; @@ -4728,12 +4647,12 @@ void vm_map_simplify(map, start) * it itself. [This assumes that attributes do not * need to be inherited, which seems ok to me] */ -kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) - vm_map_t map; - vm_offset_t address; - vm_size_t size; - vm_machine_attribute_t attribute; - vm_machine_attribute_val_t* value; /* IN/OUT */ +kern_return_t vm_map_machine_attribute( + vm_map_t map, + vm_offset_t address, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { kern_return_t ret; @@ -4758,25 +4677,30 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) /* * vm_map_print: [ debug ] */ -void vm_map_print(map) - register vm_map_t map; +void vm_map_print(db_expr_t addr, boolean_t have_addr, db_expr_t count, const char *modif) { - register vm_map_entry_t entry; + vm_map_t map; + vm_map_entry_t entry; + + if (!have_addr) + map = current_thread()->task->map; + else + map = (vm_map_t)addr; - iprintf("Task map 0x%X: pmap=0x%X,", + iprintf("Map 0x%X: pmap=0x%X,", (vm_offset_t) map, (vm_offset_t) (map->pmap)); printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries); printf("version=%d\n", map->timestamp); - indent += 2; + indent += 1; for (entry = vm_map_first_entry(map); entry != vm_map_to_entry(map); entry = entry->vme_next) { static char *inheritance_name[3] = { "share", "copy", "none"}; iprintf("map entry 0x%X: ", (vm_offset_t) entry); - printf("start=0x%X, end=0x%X, ", + printf("start=0x%X, end=0x%X\n", (vm_offset_t) entry->vme_start, (vm_offset_t) entry->vme_end); - printf("prot=%X/%X/%s, ", + iprintf("prot=%X/%X/%s, ", entry->protection, entry->max_protection, inheritance_name[entry->inheritance]); @@ -4811,13 +4735,13 @@ void vm_map_print(map) if ((entry->vme_prev == vm_map_to_entry(map)) || (entry->vme_prev->object.vm_object != entry->object.vm_object)) { - indent += 2; + indent += 1; vm_object_print(entry->object.vm_object); - indent -= 2; + indent -= 1; } } } - indent -= 2; + indent -= 1; } /* @@ -4827,13 +4751,13 @@ void vm_map_print(map) */ void vm_map_copy_print(copy) - vm_map_copy_t copy; + const vm_map_copy_t copy; { int i, npages; printf("copy object 0x%x\n", copy); - indent += 2; + indent += 1; iprintf("type=%d", copy->type); switch (copy->type) { @@ -4887,6 +4811,6 @@ void vm_map_copy_print(copy) break; } - indent -=2; + indent -= 1; } #endif /* MACH_KDB */ |