summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c161
-rw-r--r--vm/memory_object_proxy.c8
-rw-r--r--vm/memory_object_proxy.h15
-rw-r--r--vm/pmap.h24
-rw-r--r--vm/vm_debug.c35
-rw-r--r--vm/vm_external.c18
-rw-r--r--vm/vm_fault.c152
-rw-r--r--vm/vm_fault.h6
-rw-r--r--vm/vm_init.c4
-rw-r--r--vm/vm_init.h25
-rw-r--r--vm/vm_kern.c319
-rw-r--r--vm/vm_kern.h16
-rw-r--r--vm/vm_map.c502
-rw-r--r--vm/vm_map.h37
-rw-r--r--vm/vm_object.c140
-rw-r--r--vm/vm_object.h14
-rw-r--r--vm/vm_page.h24
-rw-r--r--vm/vm_pageout.c65
-rw-r--r--vm/vm_pageout.h6
-rw-r--r--vm/vm_print.h40
-rw-r--r--vm/vm_resident.c101
-rw-r--r--vm/vm_resident.h6
-rw-r--r--vm/vm_user.c150
23 files changed, 884 insertions, 984 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index e281c6a3..097ed23d 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -82,24 +82,19 @@ decl_simple_lock_data(,memory_manager_default_lock)
* argument conversion. Explicit deallocation is necessary.
*/
-kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
- lock_value, precious, reply_to, reply_to_type)
- register
- vm_object_t object;
- register
- vm_offset_t offset;
- vm_map_copy_t data_copy;
- unsigned int data_cnt;
- vm_prot_t lock_value;
- boolean_t precious;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_data_supply(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_map_copy_t data_copy,
+ unsigned int data_cnt,
+ vm_prot_t lock_value,
+ boolean_t precious,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result = KERN_SUCCESS;
vm_offset_t error_offset = 0;
- register
vm_page_t m;
- register
vm_page_t data_m;
vm_size_t original_length;
vm_offset_t original_offset;
@@ -307,29 +302,26 @@ retry_lookup:
return(result);
}
-
/*
* If successful, destroys the map copy object.
*/
-kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
- lock_value)
- vm_object_t object;
- vm_offset_t offset;
- pointer_t data;
- unsigned int data_cnt;
- vm_prot_t lock_value;
+kern_return_t memory_object_data_provided(
+ vm_object_t object,
+ vm_offset_t offset,
+ pointer_t data,
+ unsigned int data_cnt,
+ vm_prot_t lock_value)
{
return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-
-kern_return_t memory_object_data_error(object, offset, size, error_value)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
- kern_return_t error_value;
+kern_return_t memory_object_data_error(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ kern_return_t error_value)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -337,16 +329,11 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
if (size != round_page(size))
return(KERN_INVALID_ARGUMENT);
-#ifdef lint
- /* Error value is ignored at this time */
- error_value++;
-#endif
-
vm_object_lock(object);
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
m = vm_page_lookup(object, offset);
if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
@@ -370,10 +357,10 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
return(KERN_SUCCESS);
}
-kern_return_t memory_object_data_unavailable(object, offset, size)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
+kern_return_t memory_object_data_unavailable(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size)
{
#if MACH_PAGEMAP
vm_external_t existence_info = VM_EXTERNAL_NULL;
@@ -406,7 +393,7 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
/*
* We're looking for pages that are both busy and
@@ -453,12 +440,11 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
-memory_object_lock_result_t memory_object_lock_page(m, should_return,
- should_flush, prot)
- vm_page_t m;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
+memory_object_lock_result_t memory_object_lock_page(
+ vm_page_t m,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot)
{
/*
* Don't worry about pages for which the kernel
@@ -656,19 +642,17 @@ memory_object_lock_result_t memory_object_lock_page(m, should_return,
*/
kern_return_t
-memory_object_lock_request(object, offset, size,
- should_return, should_flush, prot,
- reply_to, reply_to_type)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_size_t size;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+memory_object_lock_request(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
- register vm_page_t m;
+ vm_page_t m;
vm_offset_t original_offset = offset;
vm_size_t original_size = size;
vm_offset_t paging_offset = 0;
@@ -720,8 +704,8 @@ memory_object_lock_request(object, offset, size,
#define PAGEOUT_PAGES \
MACRO_BEGIN \
vm_map_copy_t copy; \
- register int i; \
- register vm_page_t hp; \
+ int i; \
+ vm_page_t hp; \
\
vm_object_unlock(object); \
\
@@ -892,13 +876,12 @@ MACRO_END
}
kern_return_t
-memory_object_set_attributes_common(object, object_ready, may_cache,
- copy_strategy, use_old_pageout)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- boolean_t use_old_pageout;
+memory_object_set_attributes_common(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ boolean_t use_old_pageout)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -959,13 +942,12 @@ memory_object_set_attributes_common(object, object_ready, may_cache,
* XXX stub that made change_attributes an RPC. Need investigation.
*/
-kern_return_t memory_object_change_attributes(object, may_cache,
- copy_strategy, reply_to, reply_to_type)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_change_attributes(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result;
@@ -995,33 +977,32 @@ kern_return_t memory_object_change_attributes(object, may_cache,
}
kern_return_t
-memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+memory_object_set_attributes(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, object_ready,
may_cache, copy_strategy,
TRUE);
}
-kern_return_t memory_object_ready(object, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+kern_return_t memory_object_ready(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, TRUE,
may_cache, copy_strategy,
FALSE);
}
-kern_return_t memory_object_get_attributes(object, object_ready,
- may_cache, copy_strategy)
- vm_object_t object;
- boolean_t *object_ready;
- boolean_t *may_cache;
- memory_object_copy_strategy_t *copy_strategy;
+kern_return_t memory_object_get_attributes(
+ vm_object_t object,
+ boolean_t *object_ready,
+ boolean_t *may_cache,
+ memory_object_copy_strategy_t *copy_strategy)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -1041,7 +1022,7 @@ kern_return_t memory_object_get_attributes(object, object_ready,
* If successful, consumes the supplied naked send right.
*/
kern_return_t vm_set_default_memory_manager(host, default_manager)
- host_t host;
+ const host_t host;
ipc_port_t *default_manager;
{
ipc_port_t current_manager;
@@ -1123,7 +1104,7 @@ ipc_port_t memory_manager_default_reference(void)
*/
boolean_t memory_manager_default_port(port)
- ipc_port_t port;
+ const ipc_port_t port;
{
ipc_port_t current;
boolean_t result;
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index 4fed312e..a64bfcce 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -115,11 +115,11 @@ memory_object_proxy_notify (mach_msg_header_t *msg)
given OBJECT at OFFSET in the new object with the maximum
protection MAX_PROTECTION and return it in *PORT. */
kern_return_t
-memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
+memory_object_create_proxy (const ipc_space_t space, vm_prot_t max_protection,
ipc_port_t *object, natural_t object_count,
- vm_offset_t *offset, natural_t offset_count,
- vm_offset_t *start, natural_t start_count,
- vm_offset_t *len, natural_t len_count,
+ const vm_offset_t *offset, natural_t offset_count,
+ const vm_offset_t *start, natural_t start_count,
+ const vm_offset_t *len, natural_t len_count,
ipc_port_t *port)
{
memory_object_proxy_t proxy;
diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h
index f4be0d0d..dc0ea747 100644
--- a/vm/memory_object_proxy.h
+++ b/vm/memory_object_proxy.h
@@ -19,7 +19,7 @@
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
#ifndef _VM_MEMORY_OBJECT_PROXY_H_
-#define _VM_MEMORY_OBJECT_PROXT_H_
+#define _VM_MEMORY_OBJECT_PROXY_H_
#include <ipc/ipc_types.h>
#include <mach/boolean.h>
@@ -30,19 +30,8 @@
extern void memory_object_proxy_init (void);
extern boolean_t memory_object_proxy_notify (mach_msg_header_t *msg);
-extern kern_return_t memory_object_create_proxy (ipc_space_t space,
- vm_prot_t max_protection,
- ipc_port_t *object,
- natural_t object_count,
- vm_offset_t *offset,
- natural_t offset_count,
- vm_offset_t *start,
- natural_t start_count,
- vm_offset_t *len,
- natural_t len_count,
- ipc_port_t *port);
extern kern_return_t memory_object_proxy_lookup (ipc_port_t port,
ipc_port_t *object,
vm_prot_t *max_protection);
-#endif /* _VM_MEMORY_OBJECT_PROXT_H_ */
+#endif /* _VM_MEMORY_OBJECT_PROXY_H_ */
diff --git a/vm/pmap.h b/vm/pmap.h
index 59fd03ab..134f9c64 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -163,38 +163,16 @@ void pmap_clear_modify(vm_offset_t pa);
/* Return modify bit */
boolean_t pmap_is_modified(vm_offset_t pa);
-
-/*
- * Statistics routines
- */
-
-#ifndef pmap_resident_count
-extern int pmap_resident_count();
-#endif /* pmap_resident_count */
-
/*
* Sundry required routines
*/
/* Return a virtual-to-physical mapping, if possible. */
extern vm_offset_t pmap_extract(pmap_t, vm_offset_t);
-/* Is virtual address valid? */
-extern boolean_t pmap_access();
/* Perform garbage collection, if any. */
extern void pmap_collect(pmap_t);
/* Specify pageability. */
extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
-#ifndef pmap_phys_address
-/* Transform address returned by device driver mapping function to physical
- * address known to this module. */
-extern vm_offset_t pmap_phys_address();
-#endif /* pmap_phys_address */
-#ifndef pmap_phys_to_frame
-/* Inverse of pmap_phys_address, for use by device driver mapping function in
- * machine-independent pseudo-devices. */
-extern int pmap_phys_to_frame();
-#endif /* pmap_phys_to_frame */
-
/*
* Optional routines
*/
@@ -205,7 +183,7 @@ extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
#endif /* pmap_copy */
#ifndef pmap_attribute
/* Get/Set special memory attributes. */
-extern kern_return_t pmap_attribute();
+extern kern_return_t pmap_attribute(void);
#endif /* pmap_attribute */
/*
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
index 0af58b69..227090e6 100644
--- a/vm/vm_debug.c
+++ b/vm/vm_debug.c
@@ -65,8 +65,7 @@
*/
ipc_port_t
-vm_object_real_name(object)
- vm_object_t object;
+vm_object_real_name(vm_object_t object)
{
ipc_port_t port = IP_NULL;
@@ -94,11 +93,11 @@ vm_object_real_name(object)
*/
kern_return_t
-mach_vm_region_info(map, address, regionp, portp)
- vm_map_t map;
- vm_offset_t address;
- vm_region_info_t *regionp;
- ipc_port_t *portp;
+mach_vm_region_info(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_region_info_t *regionp,
+ ipc_port_t *portp)
{
vm_map_t cmap; /* current map in traversal */
vm_map_t nmap; /* next map to look at */
@@ -184,11 +183,11 @@ mach_vm_region_info(map, address, regionp, portp)
*/
kern_return_t
-mach_vm_object_info(object, infop, shadowp, copyp)
- vm_object_t object;
- vm_object_info_t *infop;
- ipc_port_t *shadowp;
- ipc_port_t *copyp;
+mach_vm_object_info(
+ vm_object_t object,
+ vm_object_info_t *infop,
+ ipc_port_t *shadowp,
+ ipc_port_t *copyp)
{
vm_object_info_t info;
vm_object_info_state_t state;
@@ -278,10 +277,10 @@ mach_vm_object_info(object, infop, shadowp, copyp)
*/
kern_return_t
-mach_vm_object_pages(object, pagesp, countp)
- vm_object_t object;
- vm_page_info_array_t *pagesp;
- natural_t *countp;
+mach_vm_object_pages(
+ vm_object_t object,
+ vm_page_info_array_t *pagesp,
+ natural_t *countp)
{
vm_size_t size;
vm_offset_t addr;
@@ -404,7 +403,7 @@ mach_vm_object_pages(object, pagesp, countp)
addr + rsize_used, size - rsize_used);
if (size_used != rsize_used)
- memset((char *) (addr + size_used), 0,
+ memset((void *) (addr + size_used), 0,
rsize_used - size_used);
kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
@@ -434,7 +433,7 @@ mach_vm_object_pages(object, pagesp, countp)
kern_return_t
host_virtual_physical_table_info(host, infop, countp)
- host_t host;
+ const host_t host;
hash_info_bucket_array_t *infop;
natural_t *countp;
{
diff --git a/vm/vm_external.c b/vm/vm_external.c
index e9643ffc..2e2593b1 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -56,8 +56,7 @@ struct kmem_cache vm_object_small_existence_map_cache;
struct kmem_cache vm_object_large_existence_map_cache;
-vm_external_t vm_external_create(size)
- vm_offset_t size;
+vm_external_t vm_external_create(vm_offset_t size)
{
vm_external_t result;
vm_size_t bytes;
@@ -78,8 +77,7 @@ vm_external_t vm_external_create(size)
return(result);
}
-void vm_external_destroy(e)
- vm_external_t e;
+void vm_external_destroy(vm_external_t e)
{
if (e == VM_EXTERNAL_NULL)
return;
@@ -97,8 +95,8 @@ void vm_external_destroy(e)
}
vm_external_state_t _vm_external_state_get(e, offset)
- vm_external_t e;
- vm_offset_t offset;
+ const vm_external_t e;
+ vm_offset_t offset;
{
unsigned
int bit, byte;
@@ -115,10 +113,10 @@ vm_external_state_t _vm_external_state_get(e, offset)
VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
}
-void vm_external_state_set(e, offset, state)
- vm_external_t e;
- vm_offset_t offset;
- vm_external_state_t state;
+void vm_external_state_set(
+ vm_external_t e,
+ vm_offset_t offset,
+ vm_external_state_t state)
{
unsigned
int bit, byte;
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 7e849616..0fa4d6af 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -51,7 +51,7 @@
#include <mach/memory_object.h>
#include <vm/memory_object_user.user.h>
/* For memory_object_data_{request,unlock} */
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/slab.h>
#if MACH_PCSAMPLE
@@ -88,8 +88,6 @@ struct kmem_cache vm_fault_state_cache;
int vm_object_absent_max = 50;
-int vm_fault_debug = 0;
-
boolean_t vm_fault_dirty_handling = FALSE;
boolean_t vm_fault_interruptible = TRUE;
@@ -125,9 +123,9 @@ void vm_fault_init(void)
* "object" must be locked.
*/
void
-vm_fault_cleanup(object, top_page)
- register vm_object_t object;
- register vm_page_t top_page;
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -204,33 +202,26 @@ vm_fault_cleanup(object, top_page)
* The "result_page" is also left busy. It is not removed
* from the pageout queues.
*/
-vm_fault_return_t vm_fault_page(first_object, first_offset,
- fault_type, must_be_resident, interruptible,
- protection,
- result_page, top_page,
- resume, continuation)
+vm_fault_return_t vm_fault_page(
/* Arguments: */
- vm_object_t first_object; /* Object to begin search */
- vm_offset_t first_offset; /* Offset into object */
- vm_prot_t fault_type; /* What access is requested */
- boolean_t must_be_resident;/* Must page be resident? */
- boolean_t interruptible; /* May fault be interrupted? */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
/* Modifies in place: */
- vm_prot_t *protection; /* Protection for mapping */
+ vm_prot_t *protection, /* Protection for mapping */
/* Returns: */
- vm_page_t *result_page; /* Page found, if successful */
- vm_page_t *top_page; /* Page in top object, if
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
* not result_page.
*/
/* More arguments: */
- boolean_t resume; /* We are restarting. */
- void (*continuation)(); /* Continuation for blocking. */
+ boolean_t resume, /* We are restarting. */
+ void (*continuation)()) /* Continuation for blocking. */
{
- register
vm_page_t m;
- register
vm_object_t object;
- register
vm_offset_t offset;
vm_page_t first_m;
vm_object_t next_object;
@@ -239,7 +230,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_prot_t access_required;
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
if (state->vmfp_backoff)
@@ -357,7 +348,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1094,7 +1085,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_fault_cleanup(object, first_m);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1141,9 +1132,9 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
*/
void
-vm_fault_continue()
+vm_fault_continue(void)
{
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
(void) vm_fault(state->vmf_map,
@@ -1154,14 +1145,13 @@ vm_fault_continue()
/*NOTREACHED*/
}
-kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
- resume, continuation)
- vm_map_t map;
- vm_offset_t vaddr;
- vm_prot_t fault_type;
- boolean_t change_wiring;
- boolean_t resume;
- void (*continuation)();
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ void (*continuation)())
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
@@ -1173,11 +1163,10 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_page_t top_page; /* Placeholder page */
kern_return_t kr;
- register
vm_page_t m; /* Fast access to result_page */
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1253,7 +1242,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_object_paging_begin(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1307,7 +1296,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
goto done;
case VM_FAULT_MEMORY_SHORTAGE:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1490,7 +1479,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
done:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
@@ -1501,21 +1490,19 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
return(kr);
}
-kern_return_t vm_fault_wire_fast();
-
/*
* vm_fault_wire:
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
pmap = vm_map_pmap(map);
@@ -1544,14 +1531,14 @@ void vm_fault_wire(map, entry)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
- vm_object_t object;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
+ vm_object_t object;
pmap = vm_map_pmap(map);
@@ -1633,14 +1620,14 @@ void vm_fault_unwire(map, entry)
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t vm_fault_wire_fast(map, va, entry)
- vm_map_t map;
- vm_offset_t va;
- vm_map_entry_t entry;
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
{
vm_object_t object;
vm_offset_t offset;
- register vm_page_t m;
+ vm_page_t m;
vm_prot_t prot;
vm_stat.faults++; /* needs lock XXX */
@@ -1782,9 +1769,9 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
* Release a page used by vm_fault_copy.
*/
-void vm_fault_copy_cleanup(page, top_page)
- vm_page_t page;
- vm_page_t top_page;
+void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
{
vm_object_t object = page->object;
@@ -1825,23 +1812,14 @@ void vm_fault_copy_cleanup(page, top_page)
* requested.
*/
kern_return_t vm_fault_copy(
- src_object,
- src_offset,
- src_size,
- dst_object,
- dst_offset,
- dst_map,
- dst_version,
- interruptible
- )
- vm_object_t src_object;
- vm_offset_t src_offset;
- vm_size_t *src_size; /* INOUT */
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_map_t dst_map;
- vm_map_version_t *dst_version;
- boolean_t interruptible;
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
{
vm_page_t result_page;
vm_prot_t prot;
@@ -2022,13 +2000,11 @@ kern_return_t vm_fault_copy(
* XXX Untested. Also unused. Eventually, this technology
* could be used in vm_fault_copy() to advantage.
*/
-vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
- register
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_page_t *result_page; /* OUT */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
{
- register
vm_page_t dst_page;
#define interruptible FALSE /* XXX */
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
index 0492ccf4..7fdbc417 100644
--- a/vm/vm_fault.h
+++ b/vm/vm_fault.h
@@ -69,4 +69,10 @@ extern void vm_fault_unwire(vm_map_t, vm_map_entry_t);
extern kern_return_t vm_fault_copy(vm_object_t, vm_offset_t, vm_size_t *,
vm_object_t, vm_offset_t, vm_map_t,
vm_map_version_t *, boolean_t);
+
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry);
+
#endif /* _VM_VM_FAULT_H_ */
diff --git a/vm/vm_init.c b/vm/vm_init.c
index 89eb0984..3d1081cc 100644
--- a/vm/vm_init.c
+++ b/vm/vm_init.c
@@ -51,7 +51,7 @@
* This is done only by the first cpu up.
*/
-void vm_mem_bootstrap()
+void vm_mem_bootstrap(void)
{
vm_offset_t start, end;
@@ -79,7 +79,7 @@ void vm_mem_bootstrap()
memory_manager_default_init();
}
-void vm_mem_init()
+void vm_mem_init(void)
{
vm_object_init();
memory_object_proxy_init();
diff --git a/vm/vm_init.h b/vm/vm_init.h
new file mode 100644
index 00000000..42ef48b2
--- /dev/null
+++ b/vm/vm_init.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _VM_VM_INIT_H_
+#define _VM_VM_INIT_H_
+
+extern void vm_mem_init(void);
+extern void vm_mem_bootstrap(void);
+
+#endif /* _VM_VM_INIT_H_ */
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index fd46e982..9c0a20b7 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -42,6 +42,7 @@
#include <kern/assert.h>
#include <kern/debug.h>
#include <kern/lock.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/printf.h>
#include <vm/pmap.h>
@@ -62,9 +63,6 @@ static struct vm_map kernel_map_store;
vm_map_t kernel_map = &kernel_map_store;
vm_map_t kernel_pageable_map;
-extern void kmem_alloc_pages();
-extern void kmem_remap_pages();
-
/*
* projected_buffer_allocate
*
@@ -82,15 +80,14 @@ extern void kmem_remap_pages();
*/
kern_return_t
-projected_buffer_allocate(map, size, persistence, kernel_p,
- user_p, protection, inheritance)
- vm_map_t map;
- vm_size_t size;
- int persistence;
- vm_offset_t *kernel_p;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_object_t object;
vm_map_entry_t u_entry, k_entry;
@@ -180,13 +177,13 @@ projected_buffer_allocate(map, size, persistence, kernel_p,
*/
kern_return_t
-projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
- vm_map_t map;
- vm_offset_t kernel_addr;
- vm_size_t size;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_map_entry_t u_entry, k_entry;
vm_offset_t physical_addr, user_addr;
@@ -253,15 +250,18 @@ projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
*/
kern_return_t
-projected_buffer_deallocate(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry, k_entry;
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return KERN_INVALID_ARGUMENT;
+
vm_map_lock(map);
- if (map == VM_MAP_NULL || map == kernel_map ||
- !vm_map_lookup_entry(map, start, &entry) ||
+ if (!vm_map_lookup_entry(map, start, &entry) ||
end > entry->vme_end ||
/*Check corresponding kernel entry*/
(k_entry = entry->projected_on) == 0) {
@@ -303,8 +303,7 @@ projected_buffer_deallocate(map, start, end)
*/
kern_return_t
-projected_buffer_collect(map)
- vm_map_t map;
+projected_buffer_collect(vm_map_t map)
{
vm_map_entry_t entry, next;
@@ -330,9 +329,10 @@ projected_buffer_collect(map)
*/
boolean_t
-projected_buffer_in_range(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
@@ -359,14 +359,15 @@ projected_buffer_in_range(map, start, end)
*/
kern_return_t
-kmem_alloc(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_object_t object;
vm_map_entry_t entry;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -385,12 +386,22 @@ kmem_alloc(map, addrp, size)
size = round_page(size);
object = vm_object_allocate(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
VM_OBJECT_NULL, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc in %p\n", map);
vm_object_deallocate(object);
return kr;
}
@@ -420,113 +431,25 @@ kmem_alloc(map, addrp, size)
}
/*
- * kmem_realloc:
- *
- * Reallocate wired-down memory in the kernel's address map
- * or a submap. Newly allocated pages are not zeroed.
- * This can only be used on regions allocated with kmem_alloc.
- *
- * If successful, the pages in the old region are mapped twice.
- * The old region is unchanged. Use kmem_free to get rid of it.
- */
-kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
- vm_map_t map;
- vm_offset_t oldaddr;
- vm_size_t oldsize;
- vm_offset_t *newaddrp;
- vm_size_t newsize;
-{
- vm_offset_t oldmin, oldmax;
- vm_offset_t newaddr;
- vm_object_t object;
- vm_map_entry_t oldentry, newentry;
- kern_return_t kr;
-
- oldmin = trunc_page(oldaddr);
- oldmax = round_page(oldaddr + oldsize);
- oldsize = oldmax - oldmin;
- newsize = round_page(newsize);
-
- /*
- * Find space for the new region.
- */
-
- vm_map_lock(map);
- kr = vm_map_find_entry(map, &newaddr, newsize, (vm_offset_t) 0,
- VM_OBJECT_NULL, &newentry);
- if (kr != KERN_SUCCESS) {
- vm_map_unlock(map);
- printf_once("no more room for kmem_realloc in %p\n", map);
- return kr;
- }
-
- /*
- * Find the VM object backing the old region.
- */
-
- if (!vm_map_lookup_entry(map, oldmin, &oldentry))
- panic("kmem_realloc");
- object = oldentry->object.vm_object;
-
- /*
- * Increase the size of the object and
- * fill in the new region.
- */
-
- vm_object_reference(object);
- vm_object_lock(object);
- if (object->size != oldsize)
- panic("kmem_realloc");
- object->size = newsize;
- vm_object_unlock(object);
-
- newentry->object.vm_object = object;
- newentry->offset = 0;
-
- /*
- * Since we have not given out this address yet,
- * it is safe to unlock the map. We are trusting
- * that nobody will play with either region.
- */
-
- vm_map_unlock(map);
-
- /*
- * Remap the pages in the old region and
- * allocate more pages for the new region.
- */
-
- kmem_remap_pages(object, 0,
- newaddr, newaddr + oldsize,
- VM_PROT_DEFAULT);
- kmem_alloc_pages(object, oldsize,
- newaddr + oldsize, newaddr + newsize,
- VM_PROT_DEFAULT);
-
- *newaddrp = newaddr;
- return KERN_SUCCESS;
-}
-
-/*
* kmem_alloc_wired:
*
* Allocate wired-down memory in the kernel's address map
* or a submap. The memory is not zero-filled.
*
* The memory is allocated in the kernel_object.
- * It may not be copied with vm_map_copy, and
- * it may not be reallocated with kmem_realloc.
+ * It may not be copied with vm_map_copy.
*/
kern_return_t
-kmem_alloc_wired(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -537,12 +460,22 @@ kmem_alloc_wired(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc_wired in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_wired in %p\n", map);
return kr;
}
@@ -591,14 +524,15 @@ kmem_alloc_wired(map, addrp, size)
*/
kern_return_t
-kmem_alloc_aligned(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
if ((size & (size - 1)) != 0)
@@ -612,12 +546,22 @@ kmem_alloc_aligned(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, size - 1,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more rooom for kmem_alloc_aligned in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_aligned in %p\n", map);
return kr;
}
@@ -665,10 +609,10 @@ kmem_alloc_aligned(map, addrp, size)
*/
kern_return_t
-kmem_alloc_pageable(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -696,10 +640,10 @@ kmem_alloc_pageable(map, addrp, size)
*/
void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
@@ -714,11 +658,12 @@ kmem_free(map, addr, size)
* a submap.
*/
void
-kmem_alloc_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -726,7 +671,7 @@ kmem_alloc_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -769,11 +714,12 @@ kmem_alloc_pages(object, offset, start, end, protection)
* a submap.
*/
void
-kmem_remap_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -781,7 +727,7 @@ kmem_remap_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -827,11 +773,13 @@ kmem_remap_pages(object, offset, start, end, protection)
*/
void
-kmem_submap(map, parent, min, max, size, pageable)
- vm_map_t map, parent;
- vm_offset_t *min, *max;
- vm_size_t size;
- boolean_t pageable;
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size,
+ boolean_t pageable)
{
vm_offset_t addr;
kern_return_t kr;
@@ -845,7 +793,7 @@ kmem_submap(map, parent, min, max, size, pageable)
*/
vm_object_reference(vm_submap_object);
- addr = (vm_offset_t) vm_map_min(parent);
+ addr = vm_map_min(parent);
kr = vm_map_enter(parent, &addr, size,
(vm_offset_t) 0, TRUE,
vm_submap_object, (vm_offset_t) 0, FALSE,
@@ -869,9 +817,9 @@ kmem_submap(map, parent, min, max, size, pageable)
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
-void kmem_init(start, end)
- vm_offset_t start;
- vm_offset_t end;
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
FALSE);
@@ -879,7 +827,6 @@ void kmem_init(start, end)
/*
* Reserve virtual memory allocated up to this time.
*/
-
if (start != VM_MIN_KERNEL_ADDRESS) {
kern_return_t rc;
vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
@@ -890,7 +837,7 @@ void kmem_init(start, end)
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (rc)
- panic("%s:%d: vm_map_enter failed (%d)\n", rc);
+ panic("vm_map_enter failed (%d)\n", rc);
}
}
@@ -907,21 +854,19 @@ void kmem_init(start, end)
*/
kern_return_t
-kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
- vm_map_t map;
- vm_offset_t *addr; /* actual addr of data */
- vm_offset_t *alloc_addr; /* page aligned addr */
- vm_size_t *alloc_size; /* size allocated */
- vm_map_copy_t copy;
- vm_size_t min_size; /* Do at least this much */
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
kern_return_t ret;
- register
vm_page_t *page_list;
vm_map_copy_t new_copy;
- register
int i;
assert(copy->type == VM_MAP_COPY_PAGE_LIST);
@@ -1013,10 +958,10 @@ kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
*/
void
-kmem_io_map_deallocate(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
/*
* Remove the mappings. The pmap_remove is needed.
@@ -1035,10 +980,11 @@ kmem_io_map_deallocate(map, addr, size)
* and the kernel map/submaps.
*/
-int copyinmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
@@ -1061,10 +1007,11 @@ int copyinmap(map, fromaddr, toaddr, length)
* and the kernel map/submaps.
*/
-int copyoutmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index 22b7c123..fb8ac7f8 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -54,8 +54,6 @@ extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *,
vm_size_t);
extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
-extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t,
- vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
@@ -82,4 +80,18 @@ extern boolean_t projected_buffer_in_range(
vm_offset_t start,
vm_offset_t end);
+extern void kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
+extern void kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
#endif /* _VM_VM_KERN_H_ */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 47db118f..ae3ce21f 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -55,22 +55,9 @@
#if MACH_KDB
#include <ddb/db_output.h>
+#include <vm/vm_print.h>
#endif /* MACH_KDB */
-
-/* Forward declarations */
-kern_return_t vm_map_delete(
- vm_map_t map,
- vm_offset_t start,
- vm_offset_t end);
-
-kern_return_t vm_map_copyout_page_list(
- vm_map_t dst_map,
- vm_offset_t *dst_addr, /* OUT */
- vm_map_copy_t copy);
-
-void vm_map_copy_page_discard (vm_map_copy_t copy);
-
/*
* Macros to copy a vm_map_entry. We must be careful to correctly
* manage the wired page count. vm_map_entry_copy() creates a new
@@ -142,8 +129,6 @@ struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
-boolean_t vm_map_lookup_entry(); /* forward declaration */
-
/*
* Placeholder object for submap operations. This object is dropped
* into the range by a call to vm_map_find, and removed when
@@ -209,11 +194,12 @@ void vm_map_init(void)
*/
}
-void vm_map_setup(map, pmap, min, max, pageable)
- vm_map_t map;
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+void vm_map_setup(
+ vm_map_t map,
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
vm_map_first_entry(map) = vm_map_to_entry(map);
vm_map_last_entry(map) = vm_map_to_entry(map);
@@ -222,6 +208,7 @@ void vm_map_setup(map, pmap, min, max, pageable)
rbtree_init(&map->hdr.tree);
map->size = 0;
+ map->user_wired = 0;
map->ref_count = 1;
map->pmap = pmap;
map->min_offset = min;
@@ -242,12 +229,13 @@ void vm_map_setup(map, pmap, min, max, pageable)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t vm_map_create(pmap, min, max, pageable)
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+vm_map_t vm_map_create(
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
- register vm_map_t result;
+ vm_map_t result;
result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
if (result == VM_MAP_NULL)
@@ -271,10 +259,10 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
_vm_map_entry_create(&(copy)->cpy_hdr)
vm_map_entry_t _vm_map_entry_create(map_header)
- register struct vm_map_header *map_header;
+ const struct vm_map_header *map_header;
{
- register kmem_cache_t cache;
- register vm_map_entry_t entry;
+ kmem_cache_t cache;
+ vm_map_entry_t entry;
if (map_header->entries_pageable)
cache = &vm_map_entry_cache;
@@ -300,10 +288,10 @@ vm_map_entry_t _vm_map_entry_create(map_header)
_vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
void _vm_map_entry_dispose(map_header, entry)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
+ const struct vm_map_header *map_header;
+ vm_map_entry_t entry;
{
- register kmem_cache_t cache;
+ kmem_cache_t cache;
if (map_header->entries_pageable)
cache = &vm_map_entry_cache;
@@ -385,8 +373,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
* Creates another valid reference to the given map.
*
*/
-void vm_map_reference(map)
- register vm_map_t map;
+void vm_map_reference(vm_map_t map)
{
if (map == VM_MAP_NULL)
return;
@@ -403,10 +390,9 @@ void vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void vm_map_deallocate(map)
- register vm_map_t map;
+void vm_map_deallocate(vm_map_t map)
{
- register int c;
+ int c;
if (map == VM_MAP_NULL)
return;
@@ -448,13 +434,13 @@ void vm_map_deallocate(map)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t vm_map_lookup_entry(map, address, entry)
- register vm_map_t map;
- register vm_offset_t address;
- vm_map_entry_t *entry; /* OUT */
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry) /* OUT */
{
- register struct rbtree_node *node;
- register vm_map_entry_t hint;
+ struct rbtree_node *node;
+ vm_map_entry_t hint;
/*
* First, make a quick check to see if we are already
@@ -505,10 +491,11 @@ boolean_t vm_map_lookup_entry(map, address, entry)
*/
boolean_t
-invalid_user_access(map, start, end, prot)
- vm_map_t map;
- vm_offset_t start, end;
- vm_prot_t prot;
+invalid_user_access(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot)
{
vm_map_entry_t entry;
@@ -532,17 +519,17 @@ invalid_user_access(map, start, end, prot)
* are initialized to zero. If an object is supplied,
* then an existing entry may be extended.
*/
-kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
- register vm_map_t map;
- vm_offset_t *address; /* OUT */
- vm_size_t size;
- vm_offset_t mask;
- vm_object_t object;
- vm_map_entry_t *o_entry; /* OUT */
+kern_return_t vm_map_find_entry(
+ vm_map_t map,
+ vm_offset_t *address, /* OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ vm_object_t object,
+ vm_map_entry_t *o_entry) /* OUT */
{
- register vm_map_entry_t entry, new_entry;
- register vm_offset_t start;
- register vm_offset_t end;
+ vm_map_entry_t entry, new_entry;
+ vm_offset_t start;
+ vm_offset_t end;
/*
* Look for the first possible address;
@@ -561,7 +548,7 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -686,8 +673,8 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
return(KERN_SUCCESS);
}
-int vm_map_pmap_enter_print = FALSE;
-int vm_map_pmap_enter_enable = FALSE;
+boolean_t vm_map_pmap_enter_print = FALSE;
+boolean_t vm_map_pmap_enter_enable = FALSE;
/*
* Routine: vm_map_pmap_enter
@@ -704,19 +691,16 @@ int vm_map_pmap_enter_enable = FALSE;
* The source map should not be locked on entry.
*/
void
-vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
- vm_map_t map;
- register
- vm_offset_t addr;
- register
- vm_offset_t end_addr;
- register
- vm_object_t object;
- vm_offset_t offset;
- vm_prot_t protection;
+vm_map_pmap_enter(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_offset_t end_addr,
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_prot_t protection)
{
while (addr < end_addr) {
- register vm_page_t m;
+ vm_page_t m;
vm_object_lock(object);
vm_object_paging_begin(object);
@@ -765,27 +749,22 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
* Arguments are as defined in the vm_map call.
*/
kern_return_t vm_map_enter(
- map,
- address, size, mask, anywhere,
- object, offset, needs_copy,
- cur_protection, max_protection, inheritance)
- register
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- vm_object_t object;
- vm_offset_t offset;
- boolean_t needs_copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ vm_object_t object,
+ vm_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register vm_map_entry_t entry;
- register vm_offset_t start;
- register vm_offset_t end;
- kern_return_t result = KERN_SUCCESS;
+ vm_map_entry_t entry;
+ vm_offset_t start;
+ vm_offset_t end;
+ kern_return_t result = KERN_SUCCESS;
#define RETURN(value) { result = value; goto BailOut; }
@@ -831,7 +810,7 @@ kern_return_t vm_map_enter(
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -979,7 +958,7 @@ kern_return_t vm_map_enter(
*/
/**/ {
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
new_entry = vm_map_entry_create(map);
@@ -1050,14 +1029,12 @@ kern_return_t vm_map_enter(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_start();
#define vm_map_clip_start(map, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
_vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \
MACRO_END
-void _vm_map_copy_clip_start();
#define vm_map_copy_clip_start(copy, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
@@ -1068,12 +1045,12 @@ void _vm_map_copy_clip_start();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_start(map_header, entry, start)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t start;
+void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Split off the front portion --
@@ -1105,14 +1082,12 @@ void _vm_map_clip_start(map_header, entry, start)
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
#define vm_map_clip_end(map, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
_vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \
MACRO_END
-void _vm_map_copy_clip_end();
#define vm_map_copy_clip_end(copy, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
@@ -1123,12 +1098,12 @@ void _vm_map_copy_clip_end();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_end(map_header, entry, end)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t end;
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Create a new entry and insert it
@@ -1183,15 +1158,15 @@ void _vm_map_clip_end(map_header, entry, end)
* range from the superior map, and then destroy the
* submap (if desired). [Better yet, don't try it.]
*/
-kern_return_t vm_map_submap(map, start, end, submap)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- vm_map_t submap;
+kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap)
{
vm_map_entry_t entry;
- register kern_return_t result = KERN_INVALID_ARGUMENT;
- register vm_object_t object;
+ kern_return_t result = KERN_INVALID_ARGUMENT;
+ vm_object_t object;
vm_map_lock(map);
@@ -1231,15 +1206,15 @@ kern_return_t vm_map_submap(map, start, end, submap)
* specified, the maximum protection is to be set;
* otherwise, only the current protection is affected.
*/
-kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t new_prot;
- register boolean_t set_max;
+kern_return_t vm_map_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
{
- register vm_map_entry_t current;
- vm_map_entry_t entry;
+ vm_map_entry_t current;
+ vm_map_entry_t entry;
vm_map_lock(map);
@@ -1319,13 +1294,13 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
* affects how the map will be shared with
* child maps at the time of vm_map_fork.
*/
-kern_return_t vm_map_inherit(map, start, end, new_inheritance)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_inherit_t new_inheritance;
+kern_return_t vm_map_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_inherit_t new_inheritance)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t temp_entry;
vm_map_lock(map);
@@ -1368,14 +1343,14 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance)
* Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
* or vm_map_pageable_user); don't call vm_map_pageable directly.
*/
-kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t access_type;
- boolean_t user_wire;
+kern_return_t vm_map_pageable_common(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t start_entry;
vm_map_lock(map);
@@ -1435,7 +1410,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ {
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
+ }
}
else {
entry->wired_count--;
@@ -1512,7 +1490,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if ((entry->user_wired_count)++ == 0)
+ {
+ map->user_wired += entry->vme_end - entry->vme_start;
entry->wired_count++;
+ }
}
else {
entry->wired_count++;
@@ -1538,6 +1519,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
(entry->vme_end > start)) {
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
}
else {
@@ -1617,12 +1599,12 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
*
* Deallocate the given entry from the target map.
*/
-void vm_map_entry_delete(map, entry)
- register vm_map_t map;
- register vm_map_entry_t entry;
+void vm_map_entry_delete(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t s, e;
- register vm_object_t object;
+ vm_offset_t s, e;
+ vm_object_t object;
extern vm_object_t kernel_object;
s = entry->vme_start;
@@ -1653,6 +1635,8 @@ void vm_map_entry_delete(map, entry)
if (entry->wired_count != 0) {
vm_fault_unwire(map, entry);
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
}
@@ -1701,10 +1685,10 @@ void vm_map_entry_delete(map, entry)
* map.
*/
-kern_return_t vm_map_delete(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
vm_map_entry_t first_entry;
@@ -1784,12 +1768,12 @@ kern_return_t vm_map_delete(map, start, end)
* Remove the given address range from the target map.
* This is the exported form of vm_map_delete.
*/
-kern_return_t vm_map_remove(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_remove(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register kern_return_t result;
+ kern_return_t result;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1807,12 +1791,11 @@ kern_return_t vm_map_remove(map, start, end)
* that have not already been stolen.
*/
void
-vm_map_copy_steal_pages(copy)
-vm_map_copy_t copy;
+vm_map_copy_steal_pages(vm_map_copy_t copy)
{
- register vm_page_t m, new_m;
- register int i;
- vm_object_t object;
+ vm_page_t m, new_m;
+ int i;
+ vm_object_t object;
for (i = 0; i < copy->cpy_npages; i++) {
@@ -1854,8 +1837,7 @@ vm_map_copy_t copy;
* stolen, they are freed. If the pages are not stolen, they
* are unbusied, and associated state is cleaned up.
*/
-void vm_map_copy_page_discard(copy)
-vm_map_copy_t copy;
+void vm_map_copy_page_discard(vm_map_copy_t copy)
{
while (copy->cpy_npages > 0) {
vm_page_t m;
@@ -1900,8 +1882,7 @@ vm_map_copy_t copy;
* vm_map_copyin).
*/
void
-vm_map_copy_discard(copy)
- vm_map_copy_t copy;
+vm_map_copy_discard(vm_map_copy_t copy)
{
free_next_copy:
if (copy == VM_MAP_COPY_NULL)
@@ -1942,7 +1923,7 @@ free_next_copy:
* here to avoid tail recursion.
*/
if (copy->cpy_cont == vm_map_copy_discard_cont) {
- register vm_map_copy_t new_copy;
+ vm_map_copy_t new_copy;
new_copy = (vm_map_copy_t) copy->cpy_cont_args;
kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
@@ -1977,8 +1958,7 @@ free_next_copy:
* deallocation will not fail.
*/
vm_map_copy_t
-vm_map_copy_copy(copy)
- vm_map_copy_t copy;
+vm_map_copy_copy(vm_map_copy_t copy)
{
vm_map_copy_t new_copy;
@@ -2024,9 +2004,9 @@ vm_map_copy_copy(copy)
* A version of vm_map_copy_discard that can be called
* as a continuation from a vm_map_copy page list.
*/
-kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copy_discard_cont(
+vm_map_copyin_args_t cont_args,
+vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_discard((vm_map_copy_t) cont_args);
if (copy_result != (vm_map_copy_t *)0)
@@ -2081,11 +2061,11 @@ vm_map_copy_t *copy_result; /* OUT */
* atomically and interruptibly, an error indication is
* returned.
*/
-kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
- vm_map_t dst_map;
- vm_offset_t dst_addr;
- vm_map_copy_t copy;
- boolean_t interruptible;
+kern_return_t vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
{
vm_size_t size;
vm_offset_t start;
@@ -2304,6 +2284,8 @@ start_pass_1:
entry->offset = copy_entry->offset;
entry->needs_copy = copy_entry->needs_copy;
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ dst_map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
vm_map_copy_entry_unlink(copy, copy_entry);
@@ -2458,19 +2440,16 @@ start_pass_1:
* If successful, consumes the copy object.
* Otherwise, the caller is responsible for it.
*/
-kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_size_t adjustment;
vm_offset_t start;
vm_offset_t vm_copy_start;
vm_map_entry_t last;
- register
vm_map_entry_t entry;
/*
@@ -2616,9 +2595,9 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* map the pages into the destination map.
*/
if (entry->wired_count != 0) {
- register vm_offset_t va;
- vm_offset_t offset;
- register vm_object_t object;
+ vm_offset_t va;
+ vm_offset_t offset;
+ vm_object_t object;
object = entry->object.vm_object;
offset = entry->offset;
@@ -2630,7 +2609,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
TRUE);
while (va < entry->vme_end) {
- register vm_page_t m;
+ vm_page_t m;
/*
* Look up the page in the object.
@@ -2715,19 +2694,16 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Version of vm_map_copyout() for page list vm map copies.
*
*/
-kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_offset_t start;
vm_offset_t end;
vm_offset_t offset;
vm_map_entry_t last;
- register
vm_object_t object;
vm_page_t *page_list, m;
vm_map_entry_t entry;
@@ -2905,6 +2881,7 @@ create_object:
if (must_wire) {
entry->wired_count = 1;
+ dst_map->user_wired += entry->vme_end - entry->vme_start;
entry->user_wired_count = 1;
} else {
entry->wired_count = 0;
@@ -3105,12 +3082,12 @@ error:
* In/out conditions:
* The source map should not be locked on entry.
*/
-kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_entry_t tmp_entry; /* Result of last map lookup --
* in multi-level lookup, this
@@ -3124,7 +3101,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
vm_offset_t src_end; /* End of entire region to be
* copied */
- register
vm_map_copy_t copy; /* Resulting copy */
/*
@@ -3191,14 +3167,12 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
*/
while (TRUE) {
- register
vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
vm_size_t src_size; /* Size of source
* map entry (in both
* maps)
*/
- register
vm_object_t src_object; /* Object to copy */
vm_offset_t src_offset;
@@ -3207,7 +3181,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* for copy-on-write?
*/
- register
vm_map_entry_t new_entry; /* Map entry for copy */
boolean_t new_entry_needs_copy; /* Will new entry be COW? */
@@ -3471,11 +3444,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* Our caller donates an object reference.
*/
-kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
- vm_object_t object;
- vm_offset_t offset; /* offset of region in object */
- vm_size_t size; /* size of region in object */
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_t copy; /* Resulting copy */
@@ -3516,12 +3489,12 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* the scheduler.
*/
-kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_page_list_cont(
+ vm_map_copyin_args_t cont_args,
+ vm_map_copy_t *copy_result) /* OUT */
{
kern_return_t result = 0; /* '=0' to quiet gcc warnings */
- register boolean_t do_abort, src_destroy, src_destroy_only;
+ boolean_t do_abort, src_destroy, src_destroy_only;
/*
* Check for cases that only require memory destruction.
@@ -3572,27 +3545,23 @@ vm_map_copy_t *copy_result; /* OUT */
* the recipient of this copy_result must be prepared to deal with it.
*/
-kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
- steal_pages, copy_result, is_cont)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- boolean_t steal_pages;
- vm_map_copy_t *copy_result; /* OUT */
- boolean_t is_cont;
+kern_return_t vm_map_copyin_page_list(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ boolean_t steal_pages,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t is_cont)
{
vm_map_entry_t src_entry;
vm_page_t m;
vm_offset_t src_start;
vm_offset_t src_end;
vm_size_t src_size;
- register
vm_object_t src_object;
- register
vm_offset_t src_offset;
vm_offset_t src_last_offset;
- register
vm_map_copy_t copy; /* Resulting copy */
kern_return_t result = KERN_SUCCESS;
boolean_t need_map_lookup;
@@ -3926,7 +3895,7 @@ retry:
*/
src_start = trunc_page(src_addr);
if (steal_pages) {
- register int i;
+ int i;
vm_offset_t unwire_end;
unwire_end = src_start;
@@ -3998,6 +3967,8 @@ retry:
assert(src_entry->wired_count > 0);
src_entry->wired_count = 0;
+ if (src_entry->user_wired_count)
+ src_map->user_wired -= src_entry->vme_end - src_entry->vme_start;
src_entry->user_wired_count = 0;
unwire_end = src_entry->vme_end;
pmap_pageable(vm_map_pmap(src_map),
@@ -4103,18 +4074,14 @@ error:
*
* The source map must not be locked.
*/
-vm_map_t vm_map_fork(old_map)
- vm_map_t old_map;
+vm_map_t vm_map_fork(vm_map_t old_map)
{
vm_map_t new_map;
- register
vm_map_entry_t old_entry;
- register
vm_map_entry_t new_entry;
pmap_t new_pmap = pmap_create((vm_size_t) 0);
vm_size_t new_size = 0;
vm_size_t entry_size;
- register
vm_object_t object;
vm_map_lock(old_map);
@@ -4377,21 +4344,20 @@ vm_map_t vm_map_fork(old_map)
* copying operations, although the data referenced will
* remain the same.
*/
-kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
- object, offset, out_prot, wired)
- vm_map_t *var_map; /* IN/OUT */
- register vm_offset_t vaddr;
- register vm_prot_t fault_type;
-
- vm_map_version_t *out_version; /* OUT */
- vm_object_t *object; /* OUT */
- vm_offset_t *offset; /* OUT */
- vm_prot_t *out_prot; /* OUT */
- boolean_t *wired; /* OUT */
+kern_return_t vm_map_lookup(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired) /* OUT */
{
- register vm_map_entry_t entry;
- register vm_map_t map = *var_map;
- register vm_prot_t prot;
+ vm_map_entry_t entry;
+ vm_map_t map = *var_map;
+ vm_prot_t prot;
RetryLookup: ;
@@ -4559,11 +4525,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
* since the given version. If successful, the map
* will not change until vm_map_verify_done() is called.
*/
-boolean_t vm_map_verify(map, version)
- register
- vm_map_t map;
- register
- vm_map_version_t *version; /* REF */
+boolean_t vm_map_verify(
+ vm_map_t map,
+ vm_map_version_t *version) /* REF */
{
boolean_t result;
@@ -4592,24 +4556,19 @@ boolean_t vm_map_verify(map, version)
* a task's address map.
*/
-kern_return_t vm_region(map, address, size,
- protection, max_protection,
- inheritance, is_shared,
- object_name, offset_in_object)
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t *size; /* OUT */
- vm_prot_t *protection; /* OUT */
- vm_prot_t *max_protection; /* OUT */
- vm_inherit_t *inheritance; /* OUT */
- boolean_t *is_shared; /* OUT */
- ipc_port_t *object_name; /* OUT */
- vm_offset_t *offset_in_object; /* OUT */
+kern_return_t vm_region(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_prot_t *protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t *inheritance, /* OUT */
+ boolean_t *is_shared, /* OUT */
+ ipc_port_t *object_name, /* OUT */
+ vm_offset_t *offset_in_object) /* OUT */
{
vm_map_entry_t tmp_entry;
- register
vm_map_entry_t entry;
- register
vm_offset_t tmp_offset;
vm_offset_t start;
@@ -4666,9 +4625,9 @@ kern_return_t vm_region(map, address, size,
* at allocation time because the adjacent entry
* is often wired down.
*/
-void vm_map_simplify(map, start)
- vm_map_t map;
- vm_offset_t start;
+void vm_map_simplify(
+ vm_map_t map,
+ vm_offset_t start)
{
vm_map_entry_t this_entry;
vm_map_entry_t prev_entry;
@@ -4727,12 +4686,12 @@ void vm_map_simplify(map, start)
* it itself. [This assumes that attributes do not
* need to be inherited, which seems ok to me]
*/
-kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_offset_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_map_machine_attribute(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
kern_return_t ret;
@@ -4757,10 +4716,9 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
/*
* vm_map_print: [ debug ]
*/
-void vm_map_print(map)
- register vm_map_t map;
+void vm_map_print(vm_map_t map)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
iprintf("Task map 0x%X: pmap=0x%X,",
(vm_offset_t) map, (vm_offset_t) (map->pmap));
@@ -4826,7 +4784,7 @@ void vm_map_print(map)
*/
void vm_map_copy_print(copy)
- vm_map_copy_t copy;
+ const vm_map_copy_t copy;
{
int i, npages;
@@ -4886,6 +4844,6 @@ void vm_map_copy_print(copy)
break;
}
- indent -=2;
+ indent -= 2;
}
#endif /* MACH_KDB */
diff --git a/vm/vm_map.h b/vm/vm_map.h
index a15e681b..9b31f90a 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -52,10 +52,10 @@
#include <vm/vm_types.h>
#include <kern/lock.h>
#include <kern/rbtree.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/* TODO: make it dynamic */
-#define KENTRY_DATA_SIZE (64*PAGE_SIZE)
+#define KENTRY_DATA_SIZE (256*PAGE_SIZE)
/*
* Types defined:
@@ -170,14 +170,18 @@ struct vm_map {
#define max_offset hdr.links.end /* end of range */
pmap_t pmap; /* Physical map */
vm_size_t size; /* virtual size */
+ vm_size_t user_wired; /* wired by user size */
int ref_count; /* Reference count */
decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
decl_simple_lock_data(, hint_lock) /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
- boolean_t wait_for_space; /* Should callers wait
+
+ /* Flags */
+ unsigned int wait_for_space:1, /* Should callers wait
for space? */
- boolean_t wiring_required;/* All memory wired? */
+ /* boolean_t */ wiring_required:1; /* All memory wired? */
+
unsigned int timestamp; /* Version number */
};
@@ -397,9 +401,6 @@ extern kern_return_t vm_map_protect(vm_map_t, vm_offset_t, vm_offset_t,
extern kern_return_t vm_map_inherit(vm_map_t, vm_offset_t, vm_offset_t,
vm_inherit_t);
-/* Debugging: print a map */
-extern void vm_map_print(vm_map_t);
-
/* Look up an address */
extern kern_return_t vm_map_lookup(vm_map_t *, vm_offset_t, vm_prot_t,
vm_map_version_t *, vm_object_t *,
@@ -440,6 +441,23 @@ extern kern_return_t vm_map_machine_attribute(vm_map_t, vm_offset_t,
/* Delete entry from map */
extern void vm_map_entry_delete(vm_map_t, vm_map_entry_t);
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end);
+
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy);
+
+void vm_map_copy_page_discard (vm_map_copy_t copy);
+
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry); /* OUT */
+
/*
* Functions implemented as macros
*/
@@ -541,6 +559,9 @@ extern void _vm_map_clip_start(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end);
#endif /* _VM_VM_MAP_H_ */
diff --git a/vm/vm_object.c b/vm/vm_object.c
index d83c39fe..deac0c2a 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -59,14 +59,6 @@
#include <ddb/db_output.h>
#endif /* MACH_KDB */
-
-void memory_object_release(
- ipc_port_t pager,
- pager_request_t pager_request,
- ipc_port_t pager_name); /* forward */
-
-void vm_object_deactivate_pages(vm_object_t);
-
/*
* Virtual memory objects maintain the actual data
* associated with allocated virtual memory. A given
@@ -233,7 +225,7 @@ static void _vm_object_setup(
vm_object_t _vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
+ vm_object_t object;
object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
@@ -245,8 +237,8 @@ vm_object_t _vm_object_allocate(
vm_object_t vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
- register ipc_port_t port;
+ vm_object_t object;
+ ipc_port_t port;
object = _vm_object_allocate(size);
port = ipc_port_alloc_kernel();
@@ -353,7 +345,7 @@ void vm_object_init(void)
* Gets another reference to the given object.
*/
void vm_object_reference(
- register vm_object_t object)
+ vm_object_t object)
{
if (object == VM_OBJECT_NULL)
return;
@@ -376,7 +368,7 @@ void vm_object_reference(
* No object may be locked.
*/
void vm_object_deallocate(
- register vm_object_t object)
+ vm_object_t object)
{
vm_object_t temp;
@@ -530,8 +522,6 @@ void vm_object_deallocate(
}
}
-boolean_t vm_object_terminate_remove_all = FALSE;
-
/*
* Routine: vm_object_terminate
* Purpose:
@@ -546,10 +536,10 @@ boolean_t vm_object_terminate_remove_all = FALSE;
* object will cease to exist.
*/
void vm_object_terminate(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_page_t p;
- vm_object_t shadow_object;
+ vm_page_t p;
+ vm_object_t shadow_object;
/*
* Make sure the object isn't already being terminated
@@ -598,10 +588,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.2 0x%x 0x%x",
- object, p);
-
VM_PAGE_FREE(p);
}
} else while (!queue_empty(&object->memq)) {
@@ -609,9 +595,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.3 0x%x 0x%x", object, p);
-
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(p);
vm_page_unlock_queues();
@@ -629,9 +612,6 @@ void vm_object_terminate(
goto free_page;
}
- if (p->fictitious)
- panic("vm_object_terminate.4 0x%x 0x%x", object, p);
-
if (!p->dirty)
p->dirty = pmap_is_modified(p->phys_addr);
@@ -752,7 +732,6 @@ void memory_object_release(
void vm_object_abort_activity(
vm_object_t object)
{
- register
vm_page_t p;
vm_page_t next;
@@ -806,17 +785,12 @@ void vm_object_abort_activity(
* or from port destruction handling (via vm_object_destroy).
*/
kern_return_t memory_object_destroy(
- register
vm_object_t object,
kern_return_t reason)
{
ipc_port_t old_object, old_name;
pager_request_t old_control;
-#ifdef lint
- reason++;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL)
return KERN_SUCCESS;
@@ -892,9 +866,9 @@ kern_return_t memory_object_destroy(
* The object must be locked.
*/
void vm_object_deactivate_pages(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_page_t p;
+ vm_page_t p;
queue_iterate(&object->memq, p, vm_page_t, listq) {
vm_page_lock_queues();
@@ -931,8 +905,8 @@ void vm_object_deactivate_pages(
boolean_t vm_object_pmap_protect_by_page = FALSE;
void vm_object_pmap_protect(
- register vm_object_t object,
- register vm_offset_t offset,
+ vm_object_t object,
+ vm_offset_t offset,
vm_size_t size,
pmap_t pmap,
vm_offset_t pmap_start,
@@ -954,8 +928,8 @@ void vm_object_pmap_protect(
}
{
- register vm_page_t p;
- register vm_offset_t end;
+ vm_page_t p;
+ vm_offset_t end;
end = offset + size;
@@ -986,7 +960,7 @@ void vm_object_pmap_protect(
* Must follow shadow chain to remove access
* to pages in shadowed objects.
*/
- register vm_object_t next_object;
+ vm_object_t next_object;
next_object = object->shadow;
if (next_object != VM_OBJECT_NULL) {
@@ -1023,11 +997,11 @@ void vm_object_pmap_protect(
* The object must *not* be locked.
*/
void vm_object_pmap_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p;
+ vm_page_t p;
if (object == VM_OBJECT_NULL)
return;
@@ -1073,7 +1047,6 @@ void vm_object_pmap_remove(
* VM_OBJECT_NULL.
*/
kern_return_t vm_object_copy_slowly(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1127,7 +1100,6 @@ kern_return_t vm_object_copy_slowly(
vm_prot_t prot = VM_PROT_READ;
vm_page_t _result_page;
vm_page_t top_page;
- register
vm_page_t result_page;
vm_object_lock(src_object);
@@ -1247,8 +1219,6 @@ kern_return_t vm_object_copy_slowly(
* The object should be unlocked on entry and exit.
*/
-vm_object_t vm_object_copy_delayed(); /* forward declaration */
-
boolean_t vm_object_copy_temporary(
vm_object_t *_object, /* INOUT */
vm_offset_t *_offset, /* INOUT */
@@ -1257,10 +1227,6 @@ boolean_t vm_object_copy_temporary(
{
vm_object_t object = *_object;
-#ifdef lint
- ++*_offset;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL) {
*_src_needs_copy = FALSE;
*_dst_needs_copy = FALSE;
@@ -1360,16 +1326,6 @@ kern_return_t vm_object_copy_call(
vm_page_t p;
/*
- * Set the backing object for the new
- * temporary object.
- */
-
- assert(src_object->ref_count > 0);
- src_object->ref_count++;
- vm_object_paging_begin(src_object);
- vm_object_unlock(src_object);
-
- /*
* Create a memory object port to be associated
* with this new vm_object.
*
@@ -1382,10 +1338,18 @@ kern_return_t vm_object_copy_call(
*/
new_memory_object = ipc_port_alloc_kernel();
- if (new_memory_object == IP_NULL) {
- panic("vm_object_copy_call: allocate memory object port");
- /* XXX Shouldn't panic here. */
- }
+ if (new_memory_object == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * Set the backing object for the new
+ * temporary object.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_paging_begin(src_object);
+ vm_object_unlock(src_object);
/* we hold a naked receive right for new_memory_object */
(void) ipc_port_make_send(new_memory_object);
@@ -1490,7 +1454,7 @@ vm_object_t vm_object_copy_delayed(
* synchronization required in the "push"
* operation described above.
*
- * The copy-on-write is said to be assymetric because
+ * The copy-on-write is said to be asymmetric because
* the original object is *not* marked copy-on-write.
* A copied page is pushed to the copy object, regardless
* which party attempted to modify the page.
@@ -1623,7 +1587,6 @@ vm_object_t vm_object_copy_delayed(
* and may be interrupted.
*/
kern_return_t vm_object_copy_strategically(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1736,8 +1699,8 @@ void vm_object_shadow(
vm_offset_t *offset, /* IN/OUT */
vm_size_t length)
{
- register vm_object_t source;
- register vm_object_t result;
+ vm_object_t source;
+ vm_object_t result;
source = *object;
@@ -2009,7 +1972,6 @@ vm_object_t vm_object_enter(
vm_size_t size,
boolean_t internal)
{
- register
vm_object_t object;
vm_object_t new_object;
boolean_t must_init;
@@ -2227,7 +2189,6 @@ restart:
* daemon will be using this routine.
*/
void vm_object_pager_create(
- register
vm_object_t object)
{
ipc_port_t pager;
@@ -2372,14 +2333,14 @@ boolean_t vm_object_collapse_bypass_allowed = TRUE;
* so the caller should hold a reference for the object.
*/
void vm_object_collapse(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_object_t backing_object;
- register vm_offset_t backing_offset;
- register vm_size_t size;
- register vm_offset_t new_offset;
- register vm_page_t p, pp;
- ipc_port_t old_name_port;
+ vm_object_t backing_object;
+ vm_offset_t backing_offset;
+ vm_size_t size;
+ vm_offset_t new_offset;
+ vm_page_t p, pp;
+ ipc_port_t old_name_port;
if (!vm_object_collapse_allowed)
return;
@@ -2749,11 +2710,11 @@ unsigned int vm_object_page_remove_lookup = 0;
unsigned int vm_object_page_remove_iterate = 0;
void vm_object_page_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p, next;
+ vm_page_t p, next;
/*
* One and two page removals are most popular.
@@ -2814,7 +2775,7 @@ void vm_object_page_remove(
*/
boolean_t vm_object_coalesce(
- register vm_object_t prev_object,
+ vm_object_t prev_object,
vm_object_t next_object,
vm_offset_t prev_offset,
vm_offset_t next_offset,
@@ -2823,10 +2784,6 @@ boolean_t vm_object_coalesce(
{
vm_size_t newsize;
-#ifdef lint
- next_offset++;
-#endif /* lint */
-
if (next_object != VM_OBJECT_NULL) {
return FALSE;
}
@@ -2969,6 +2926,7 @@ vm_object_page_map(
#if MACH_KDB
+#include <vm/vm_print.h>
#define printf kdbprintf
boolean_t vm_object_print_pages = FALSE;
@@ -2979,9 +2937,9 @@ boolean_t vm_object_print_pages = FALSE;
void vm_object_print(
vm_object_t object)
{
- register vm_page_t p;
+ vm_page_t p;
- register int count;
+ int count;
if (object == VM_OBJECT_NULL)
return;
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 4e4c9498..3bfc67ab 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -45,7 +45,7 @@
#include <kern/lock.h>
#include <kern/assert.h>
#include <kern/debug.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <vm/pmap.h>
#include <ipc/ipc_types.h>
@@ -233,8 +233,6 @@ extern void vm_object_page_map(
vm_offset_t (*)(void *, vm_offset_t),
void *);
-extern void vm_object_print(vm_object_t);
-
extern vm_object_t vm_object_request_object(struct ipc_port *);
extern boolean_t vm_object_coalesce(
@@ -247,6 +245,16 @@ extern boolean_t vm_object_coalesce(
extern void vm_object_pager_wakeup(ipc_port_t pager);
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name);
+
+void vm_object_deactivate_pages(vm_object_t);
+
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object);
+
/*
* Event waiting handling
*/
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 4536d1c5..e6a8c497 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -42,7 +42,7 @@
#include <kern/queue.h>
#include <kern/lock.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/sched_prim.h> /* definitions of wait/wakeup */
#if MACH_VM_DEBUG
@@ -84,7 +84,7 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into that object (O,P) */
- unsigned int wire_count:16, /* how many wired down maps use me?
+ unsigned int wire_count:15, /* how many wired down maps use me?
(O&P) */
/* boolean_t */ inactive:1, /* page is in inactive list (P) */
active:1, /* page is in active list (P) */
@@ -92,14 +92,8 @@ struct vm_page {
free:1, /* page is on free list (P) */
reference:1, /* page has been used (P) */
external:1, /* page considered external (P) */
- extcounted:1, /* page counted in ext counts (P) */
- :0; /* (force to 'long' boundary) */
-#ifdef ns32000
- int pad; /* extra space for ns32000 bit ops */
-#endif /* ns32000 */
-
- unsigned int
- /* boolean_t */ busy:1, /* page is in transit (O) */
+ extcounted:1, /* page counted in ext counts (P) */
+ busy:1, /* page is in transit (O) */
wanted:1, /* someone is waiting for page (O) */
tabled:1, /* page is in VP table (O) */
fictitious:1, /* Physical page doesn't exist (O) */
@@ -112,10 +106,9 @@ struct vm_page {
dirty:1, /* Page must be cleaned (O) */
precious:1, /* Page is precious; data must be
* returned even if clean (O) */
- overwriting:1, /* Request to unlock has been made
+ overwriting:1; /* Request to unlock has been made
* without having data. (O)
* [See vm_object_overwrite] */
- :0;
vm_offset_t phys_addr; /* Physical address of page, passed
* to pmap_enter (read-only) */
@@ -156,11 +149,6 @@ extern
queue_head_t vm_page_queue_inactive; /* inactive memory queue */
extern
-vm_offset_t first_phys_addr; /* physical address for first_page */
-extern
-vm_offset_t last_phys_addr; /* physical address for last_page */
-
-extern
int vm_page_free_count; /* How many pages are free? */
extern
int vm_page_fictitious_count;/* How many fictitious pages are free? */
@@ -247,8 +235,6 @@ extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
extern void vm_page_wire(vm_page_t);
extern void vm_page_unwire(vm_page_t);
-extern void vm_set_page_size(void);
-
#if MACH_VM_DEBUG
extern unsigned int vm_page_info(
hash_info_bucket_t *info,
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 661675f0..51a6a0d4 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -52,7 +52,6 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <machine/locore.h>
-#include <machine/vm_tuning.h>
@@ -98,7 +97,7 @@
*/
#ifndef VM_PAGE_FREE_TARGET
-#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
+#define VM_PAGE_FREE_TARGET(free) (150 + (free) * 10 / 100)
#endif /* VM_PAGE_FREE_TARGET */
/*
@@ -107,7 +106,7 @@
*/
#ifndef VM_PAGE_FREE_MIN
-#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#define VM_PAGE_FREE_MIN(free) (100 + (free) * 8 / 100)
#endif /* VM_PAGE_FREE_MIN */
/* When vm_page_external_count exceeds vm_page_external_limit,
@@ -133,7 +132,7 @@
* operation by dipping into the reserved pool of pages. */
#ifndef VM_PAGE_FREE_RESERVED
-#define VM_PAGE_FREE_RESERVED 50
+#define VM_PAGE_FREE_RESERVED 500
#endif /* VM_PAGE_FREE_RESERVED */
/*
@@ -145,7 +144,7 @@
*/
#ifndef VM_PAGEOUT_RESERVED_INTERNAL
-#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 25)
+#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 250)
#endif /* VM_PAGEOUT_RESERVED_INTERNAL */
/*
@@ -157,12 +156,9 @@
*/
#ifndef VM_PAGEOUT_RESERVED_REALLY
-#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 40)
+#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 400)
#endif /* VM_PAGEOUT_RESERVED_REALLY */
-extern void vm_pageout_continue();
-extern void vm_pageout_scan_continue();
-
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
@@ -230,16 +226,16 @@ unsigned int vm_pageout_inactive_cleaned_external = 0;
* not busy on exit.
*/
vm_page_t
-vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
- register vm_page_t m;
- vm_offset_t paging_offset;
- register vm_object_t new_object;
- vm_offset_t new_offset;
- boolean_t flush;
+vm_pageout_setup(
+ vm_page_t m,
+ vm_offset_t paging_offset,
+ vm_object_t new_object,
+ vm_offset_t new_offset,
+ boolean_t flush)
{
- register vm_object_t old_object = m->object;
- register vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
- register vm_page_t new_m;
+ vm_object_t old_object = m->object;
+ vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
+ vm_page_t new_m;
assert(m->busy && !m->absent && !m->fictitious);
@@ -417,15 +413,15 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
* copy to a new page in a new object, if not.
*/
void
-vm_pageout_page(m, initial, flush)
- register vm_page_t m;
- boolean_t initial;
- boolean_t flush;
+vm_pageout_page(
+ vm_page_t m,
+ boolean_t initial,
+ boolean_t flush)
{
vm_map_copy_t copy;
- register vm_object_t old_object;
- register vm_object_t new_object;
- register vm_page_t holding_page;
+ vm_object_t old_object;
+ vm_object_t new_object;
+ vm_page_t holding_page;
vm_offset_t paging_offset;
kern_return_t rc;
boolean_t precious_clean;
@@ -511,7 +507,7 @@ vm_pageout_page(m, initial, flush)
* vm_page_free_wanted == 0.
*/
-void vm_pageout_scan()
+void vm_pageout_scan(void)
{
unsigned int burst_count;
unsigned int want_pages;
@@ -555,12 +551,14 @@ void vm_pageout_scan()
stack_collect();
net_kmsg_collect();
consider_task_collect();
+ if (0) /* XXX: pcb_collect doesn't do anything yet, so it is
+ pointless to call consider_thread_collect. */
consider_thread_collect();
slab_collect();
for (burst_count = 0;;) {
- register vm_page_t m;
- register vm_object_t object;
+ vm_page_t m;
+ vm_object_t object;
unsigned int free_count;
/*
@@ -578,7 +576,7 @@ void vm_pageout_scan()
while ((vm_page_inactive_count < vm_page_inactive_target) &&
!queue_empty(&vm_page_queue_active)) {
- register vm_object_t obj;
+ vm_object_t obj;
vm_pageout_active++;
m = (vm_page_t) queue_first(&vm_page_queue_active);
@@ -695,7 +693,7 @@ void vm_pageout_scan()
if (want_pages || m->external)
break;
- m = (vm_page_t) queue_next (m);
+ m = (vm_page_t) queue_next (&m->pageq);
if (!m)
goto pause;
}
@@ -857,7 +855,7 @@ void vm_pageout_scan()
}
}
-void vm_pageout_scan_continue()
+void vm_pageout_scan_continue(void)
{
/*
* We just paused to let the pagers catch up.
@@ -888,7 +886,7 @@ void vm_pageout_scan_continue()
* vm_pageout is the high level pageout daemon.
*/
-void vm_pageout_continue()
+void vm_pageout_continue(void)
{
/*
* The pageout daemon is never done, so loop forever.
@@ -910,12 +908,13 @@ void vm_pageout_continue()
}
}
-void vm_pageout()
+void vm_pageout(void)
{
int free_after_reserve;
current_thread()->vm_privilege = TRUE;
stack_privilege(current_thread());
+ thread_set_own_priority(0);
/*
* Initialize some paging parameters.
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
index d41ee30a..ea6cfaf4 100644
--- a/vm/vm_pageout.h
+++ b/vm/vm_pageout.h
@@ -44,4 +44,10 @@ extern vm_page_t vm_pageout_setup(vm_page_t, vm_offset_t, vm_object_t,
vm_offset_t, boolean_t);
extern void vm_pageout_page(vm_page_t, boolean_t, boolean_t);
+extern void vm_pageout(void) __attribute__((noreturn));
+
+extern void vm_pageout_continue(void) __attribute__((noreturn));
+
+extern void vm_pageout_scan_continue(void) __attribute__((noreturn));
+
#endif /* _VM_VM_PAGEOUT_H_ */
diff --git a/vm/vm_print.h b/vm/vm_print.h
new file mode 100644
index 00000000..eab534eb
--- /dev/null
+++ b/vm/vm_print.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef VM_PRINT_H
+#define VM_PRINT_H
+
+#include <vm/vm_map.h>
+#include <machine/db_machdep.h>
+
+/* Debugging: print a map */
+extern void vm_map_print(vm_map_t);
+
+/* Pretty-print a copy object for ddb. */
+extern void vm_map_copy_print(const vm_map_copy_t);
+
+#include <vm/vm_object.h>
+
+extern void vm_object_print(vm_object_t);
+
+#include <vm/vm_page.h>
+
+extern void vm_page_print(const vm_page_t);
+
+#endif /* VM_PRINT_H */
+
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 7cf4fb16..c70fa734 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -27,7 +27,7 @@
* the rights to redistribute these changes.
*/
/*
- * File: vm/vm_page.c
+ * File: vm/vm_resident.c
* Author: Avadis Tevanian, Jr., Michael Wayne Young
*
* Resident memory management module.
@@ -60,11 +60,12 @@
#if MACH_KDB
#include <ddb/db_output.h>
+#include <vm/vm_print.h>
#endif /* MACH_KDB */
/*
- * Associated with eacn page of user-allocatable memory is a
+ * Associated with each page of user-allocatable memory is a
* page structure.
*/
@@ -191,7 +192,7 @@ void vm_page_bootstrap(
vm_offset_t *startp,
vm_offset_t *endp)
{
- register vm_page_t m;
+ vm_page_t m;
int i;
/*
@@ -273,7 +274,7 @@ void vm_page_bootstrap(
sizeof(vm_page_bucket_t));
for (i = 0; i < vm_page_bucket_count; i++) {
- register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
bucket->pages = VM_PAGE_NULL;
simple_lock_init(&bucket->lock);
@@ -395,7 +396,7 @@ void pmap_startup(
while (pmap_next_page(&paddr))
i++;
if (i)
- printf("%d memory page(s) left away\n", i);
+ printf("%u memory page(s) left away\n", i);
/*
* Release pages in reverse order so that physical pages
@@ -482,11 +483,11 @@ void vm_page_create(
*/
void vm_page_insert(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -554,11 +555,11 @@ void vm_page_insert(
*/
void vm_page_replace(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -581,7 +582,7 @@ void vm_page_replace(
simple_lock(&bucket->lock);
if (bucket->pages) {
vm_page_t *mp = &bucket->pages;
- register vm_page_t m = *mp;
+ vm_page_t m = *mp;
do {
if (m->object == object && m->offset == offset) {
/*
@@ -645,10 +646,10 @@ void vm_page_replace(
*/
void vm_page_remove(
- register vm_page_t mem)
+ vm_page_t mem)
{
- register vm_page_bucket_t *bucket;
- register vm_page_t this;
+ vm_page_bucket_t *bucket;
+ vm_page_t this;
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -664,7 +665,7 @@ void vm_page_remove(
bucket->pages = mem->next;
} else {
- register vm_page_t *prev;
+ vm_page_t *prev;
for (prev = &this->next;
(this = *prev) != mem;
@@ -703,11 +704,11 @@ void vm_page_remove(
*/
vm_page_t vm_page_lookup(
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_t mem;
- register vm_page_bucket_t *bucket;
+ vm_page_t mem;
+ vm_page_bucket_t *bucket;
/*
* Search the hash table for this object/offset pair
@@ -734,9 +735,9 @@ vm_page_t vm_page_lookup(
* The object must be locked.
*/
void vm_page_rename(
- register vm_page_t mem,
- register vm_object_t new_object,
- vm_offset_t new_offset)
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset)
{
/*
* Changes to mem->object require the page lock because
@@ -773,7 +774,7 @@ void vm_page_init(
vm_page_t vm_page_grab_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
simple_lock(&vm_page_queue_free_lock);
m = vm_page_queue_fictitious;
@@ -794,7 +795,7 @@ vm_page_t vm_page_grab_fictitious(void)
*/
void vm_page_release_fictitious(
- register vm_page_t m)
+ vm_page_t m)
{
simple_lock(&vm_page_queue_free_lock);
if (m->free)
@@ -817,7 +818,7 @@ int vm_page_fictitious_quantum = 5;
void vm_page_more_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
int i;
for (i = 0; i < vm_page_fictitious_quantum; i++) {
@@ -838,10 +839,10 @@ void vm_page_more_fictitious(void)
*/
boolean_t vm_page_convert(
- register vm_page_t m,
+ vm_page_t m,
boolean_t external)
{
- register vm_page_t real_m;
+ vm_page_t real_m;
real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
@@ -867,7 +868,7 @@ boolean_t vm_page_convert(
vm_page_t vm_page_grab(
boolean_t external)
{
- register vm_page_t mem;
+ vm_page_t mem;
simple_lock(&vm_page_queue_free_lock);
@@ -917,7 +918,7 @@ vm_page_t vm_page_grab(
return mem;
}
-vm_offset_t vm_page_grab_phys_addr()
+vm_offset_t vm_page_grab_phys_addr(void)
{
vm_page_t p = vm_page_grab(FALSE);
if (p == VM_PAGE_NULL)
@@ -948,7 +949,7 @@ vm_page_grab_contiguous_pages(
natural_t *bits,
boolean_t external)
{
- register int first_set;
+ int first_set;
int size, alloc_size;
kern_return_t ret;
vm_page_t mem, *prevmemp;
@@ -1005,7 +1006,7 @@ vm_page_grab_contiguous_pages(
*/
mem = vm_page_queue_free;
while (mem) {
- register int word_index, bit_index;
+ int word_index, bit_index;
bit_index = (mem->phys_addr >> PAGE_SHIFT);
word_index = bit_index / NBPEL;
@@ -1022,14 +1023,14 @@ vm_page_grab_contiguous_pages(
* the free list.
*/
{
- register int bits_so_far = 0, i;
+ int bits_so_far = 0, i;
first_set = 0;
for (i = 0; i < size; i += sizeof(natural_t)) {
- register natural_t v = bits[i / sizeof(natural_t)];
- register int bitpos;
+ natural_t v = bits[i / sizeof(natural_t)];
+ int bitpos;
/*
* Bitscan this one word
@@ -1098,7 +1099,7 @@ found_em:
if (external)
vm_page_external_count += npages;
{
- register vm_offset_t first_phys, last_phys;
+ vm_offset_t first_phys, last_phys;
/* cache values for compare */
first_phys = first_set << PAGE_SHIFT;
@@ -1110,7 +1111,7 @@ found_em:
while (mem) {
- register vm_offset_t addr;
+ vm_offset_t addr;
addr = mem->phys_addr;
@@ -1164,8 +1165,8 @@ out:
*/
void vm_page_release(
- register vm_page_t mem,
- boolean_t external)
+ vm_page_t mem,
+ boolean_t external)
{
simple_lock(&vm_page_queue_free_lock);
if (mem->free)
@@ -1256,7 +1257,7 @@ vm_page_t vm_page_alloc(
vm_object_t object,
vm_offset_t offset)
{
- register vm_page_t mem;
+ vm_page_t mem;
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
@@ -1278,7 +1279,7 @@ vm_page_t vm_page_alloc(
* Object and page queues must be locked prior to entry.
*/
void vm_page_free(
- register vm_page_t mem)
+ vm_page_t mem)
{
if (mem->free)
panic("vm_page_free");
@@ -1329,7 +1330,7 @@ void vm_page_free(
* The page's object and the page queues must be locked.
*/
void vm_page_wire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1350,7 +1351,7 @@ void vm_page_wire(
* The page's object and the page queues must be locked.
*/
void vm_page_unwire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1373,7 +1374,7 @@ void vm_page_unwire(
* The page queues must be locked.
*/
void vm_page_deactivate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1407,7 +1408,7 @@ void vm_page_deactivate(
*/
void vm_page_activate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1504,10 +1505,10 @@ vm_page_info(
* Routine: vm_page_print [exported]
*/
void vm_page_print(p)
- vm_page_t p;
+ const vm_page_t p;
{
iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
- printf(" offset 0x%X", (vm_offset_t) p->offset);
+ printf(" offset 0x%X", p->offset);
printf("wire_count %d,", p->wire_count);
printf(" %s",
(p->active ? "active" : (p->inactive ? "inactive" : "loose")));
@@ -1532,7 +1533,7 @@ void vm_page_print(p)
printf("%s,",
(p->tabled ? "" : "not_tabled"));
printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
- (vm_offset_t) p->phys_addr,
+ p->phys_addr,
(vm_offset_t) p->page_lock,
(vm_offset_t) p->unlock_request);
}
diff --git a/vm/vm_resident.h b/vm/vm_resident.h
index 67f1807f..e8bf6818 100644
--- a/vm/vm_resident.h
+++ b/vm/vm_resident.h
@@ -38,8 +38,8 @@
* The object and page must be locked.
*/
extern void vm_page_replace (
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset);
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
#endif /* _VM_RESIDENT_H_ */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 6fe398e0..8c7a5d8f 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -56,11 +56,11 @@ vm_statistics_data_t vm_stat;
* vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
-kern_return_t vm_allocate(map, addr, size, anywhere)
- register vm_map_t map;
- register vm_offset_t *addr;
- register vm_size_t size;
- boolean_t anywhere;
+kern_return_t vm_allocate(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t anywhere)
{
kern_return_t result;
@@ -97,10 +97,10 @@ kern_return_t vm_allocate(map, addr, size, anywhere)
* vm_deallocate deallocates the specified range of addresses in the
* specified address map.
*/
-kern_return_t vm_deallocate(map, start, size)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
+kern_return_t vm_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -115,11 +115,11 @@ kern_return_t vm_deallocate(map, start, size)
* vm_inherit sets the inheritance of the specified range in the
* specified map.
*/
-kern_return_t vm_inherit(map, start, size, new_inheritance)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- vm_inherit_t new_inheritance;
+kern_return_t vm_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_inherit_t new_inheritance)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -149,12 +149,12 @@ kern_return_t vm_inherit(map, start, size, new_inheritance)
* specified map.
*/
-kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- boolean_t set_maximum;
- vm_prot_t new_protection;
+kern_return_t vm_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ boolean_t set_maximum,
+ vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) ||
(new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
@@ -172,9 +172,9 @@ kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
set_maximum));
}
-kern_return_t vm_statistics(map, stat)
- vm_map_t map;
- vm_statistics_data_t *stat;
+kern_return_t vm_statistics(
+ vm_map_t map,
+ vm_statistics_data_t *stat)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -217,15 +217,13 @@ kern_return_t vm_cache_statistics(
* Handle machine-specific attributes for a mapping, such
* as cachability, migrability, etc.
*/
-kern_return_t vm_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_machine_attribute(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
- extern kern_return_t vm_map_machine_attribute();
-
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -237,12 +235,12 @@ kern_return_t vm_machine_attribute(map, address, size, attribute, value)
return vm_map_machine_attribute(map, address, size, attribute, value);
}
-kern_return_t vm_read(map, address, size, data, data_size)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- pointer_t *data;
- vm_size_t *data_size;
+kern_return_t vm_read(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ pointer_t *data,
+ vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
@@ -261,11 +259,11 @@ kern_return_t vm_read(map, address, size, data, data_size)
return(error);
}
-kern_return_t vm_write(map, address, data, size)
- vm_map_t map;
- vm_address_t address;
- pointer_t data;
- vm_size_t size;
+kern_return_t vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
@@ -274,11 +272,11 @@ kern_return_t vm_write(map, address, data, size)
FALSE /* interruptible XXX */);
}
-kern_return_t vm_copy(map, source_address, size, dest_address)
- vm_map_t map;
- vm_address_t source_address;
- vm_size_t size;
- vm_address_t dest_address;
+kern_return_t vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
@@ -306,26 +304,19 @@ kern_return_t vm_copy(map, source_address, size, dest_address)
* Routine: vm_map
*/
kern_return_t vm_map(
- target_map,
- address, size, mask, anywhere,
- memory_object, offset,
- copy,
- cur_protection, max_protection, inheritance)
- vm_map_t target_map;
- vm_offset_t *address;
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- ipc_port_t memory_object;
- vm_offset_t offset;
- boolean_t copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ ipc_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register
vm_object_t object;
- register
kern_return_t result;
if ((target_map == VM_MAP_NULL) ||
@@ -414,15 +405,32 @@ kern_return_t vm_map(
*
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
-kern_return_t vm_wire(host, map, start, size, access)
- host_t host;
- register vm_map_t map;
+kern_return_t vm_wire(port, map, start, size, access)
+ const ipc_port_t port;
+ vm_map_t map;
vm_offset_t start;
vm_size_t size;
vm_prot_t access;
{
- if (host == HOST_NULL)
+ host_t host;
+ boolean_t priv;
+
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ (ip_kotype(port) != IKOT_HOST_PRIV
+ && ip_kotype(port) != IKOT_HOST))
+ {
+ ip_unlock(port);
return KERN_INVALID_HOST;
+ }
+
+ priv = ip_kotype(port) == IKOT_HOST_PRIV;
+ ip_unlock(port);
+
+ host = (host_t) port->ip_kobject;
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
@@ -435,6 +443,10 @@ kern_return_t vm_wire(host, map, start, size, access)
if (projected_buffer_in_range(map, start, start+size))
return(KERN_INVALID_ARGUMENT);
+ /* TODO: make it tunable */
+ if (!priv && access != VM_PROT_NONE && map->user_wired + size > 65536)
+ return KERN_NO_ACCESS;
+
return vm_map_pageable_user(map,
trunc_page(start),
round_page(start+size),