summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c161
-rw-r--r--vm/memory_object_proxy.c10
-rw-r--r--vm/memory_object_proxy.h15
-rw-r--r--vm/pmap.h39
-rw-r--r--vm/vm_debug.c35
-rw-r--r--vm/vm_external.c28
-rw-r--r--vm/vm_external.h5
-rw-r--r--vm/vm_fault.c160
-rw-r--r--vm/vm_fault.h6
-rw-r--r--vm/vm_init.c5
-rw-r--r--vm/vm_init.h25
-rw-r--r--vm/vm_kern.c319
-rw-r--r--vm/vm_kern.h16
-rw-r--r--vm/vm_map.c586
-rw-r--r--vm/vm_map.h37
-rw-r--r--vm/vm_object.c181
-rw-r--r--vm/vm_object.h14
-rw-r--r--vm/vm_page.c782
-rw-r--r--vm/vm_page.h275
-rw-r--r--vm/vm_pageout.c85
-rw-r--r--vm/vm_pageout.h6
-rw-r--r--vm/vm_print.h25
-rw-r--r--vm/vm_resident.c663
-rw-r--r--vm/vm_resident.h6
-rw-r--r--vm/vm_user.c149
25 files changed, 2114 insertions, 1519 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index e281c6a3..097ed23d 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -82,24 +82,19 @@ decl_simple_lock_data(,memory_manager_default_lock)
* argument conversion. Explicit deallocation is necessary.
*/
-kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
- lock_value, precious, reply_to, reply_to_type)
- register
- vm_object_t object;
- register
- vm_offset_t offset;
- vm_map_copy_t data_copy;
- unsigned int data_cnt;
- vm_prot_t lock_value;
- boolean_t precious;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_data_supply(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_map_copy_t data_copy,
+ unsigned int data_cnt,
+ vm_prot_t lock_value,
+ boolean_t precious,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result = KERN_SUCCESS;
vm_offset_t error_offset = 0;
- register
vm_page_t m;
- register
vm_page_t data_m;
vm_size_t original_length;
vm_offset_t original_offset;
@@ -307,29 +302,26 @@ retry_lookup:
return(result);
}
-
/*
* If successful, destroys the map copy object.
*/
-kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
- lock_value)
- vm_object_t object;
- vm_offset_t offset;
- pointer_t data;
- unsigned int data_cnt;
- vm_prot_t lock_value;
+kern_return_t memory_object_data_provided(
+ vm_object_t object,
+ vm_offset_t offset,
+ pointer_t data,
+ unsigned int data_cnt,
+ vm_prot_t lock_value)
{
return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-
-kern_return_t memory_object_data_error(object, offset, size, error_value)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
- kern_return_t error_value;
+kern_return_t memory_object_data_error(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ kern_return_t error_value)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -337,16 +329,11 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
if (size != round_page(size))
return(KERN_INVALID_ARGUMENT);
-#ifdef lint
- /* Error value is ignored at this time */
- error_value++;
-#endif
-
vm_object_lock(object);
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
m = vm_page_lookup(object, offset);
if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
@@ -370,10 +357,10 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
return(KERN_SUCCESS);
}
-kern_return_t memory_object_data_unavailable(object, offset, size)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
+kern_return_t memory_object_data_unavailable(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size)
{
#if MACH_PAGEMAP
vm_external_t existence_info = VM_EXTERNAL_NULL;
@@ -406,7 +393,7 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
/*
* We're looking for pages that are both busy and
@@ -453,12 +440,11 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
-memory_object_lock_result_t memory_object_lock_page(m, should_return,
- should_flush, prot)
- vm_page_t m;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
+memory_object_lock_result_t memory_object_lock_page(
+ vm_page_t m,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot)
{
/*
* Don't worry about pages for which the kernel
@@ -656,19 +642,17 @@ memory_object_lock_result_t memory_object_lock_page(m, should_return,
*/
kern_return_t
-memory_object_lock_request(object, offset, size,
- should_return, should_flush, prot,
- reply_to, reply_to_type)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_size_t size;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+memory_object_lock_request(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
- register vm_page_t m;
+ vm_page_t m;
vm_offset_t original_offset = offset;
vm_size_t original_size = size;
vm_offset_t paging_offset = 0;
@@ -720,8 +704,8 @@ memory_object_lock_request(object, offset, size,
#define PAGEOUT_PAGES \
MACRO_BEGIN \
vm_map_copy_t copy; \
- register int i; \
- register vm_page_t hp; \
+ int i; \
+ vm_page_t hp; \
\
vm_object_unlock(object); \
\
@@ -892,13 +876,12 @@ MACRO_END
}
kern_return_t
-memory_object_set_attributes_common(object, object_ready, may_cache,
- copy_strategy, use_old_pageout)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- boolean_t use_old_pageout;
+memory_object_set_attributes_common(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ boolean_t use_old_pageout)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -959,13 +942,12 @@ memory_object_set_attributes_common(object, object_ready, may_cache,
* XXX stub that made change_attributes an RPC. Need investigation.
*/
-kern_return_t memory_object_change_attributes(object, may_cache,
- copy_strategy, reply_to, reply_to_type)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_change_attributes(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result;
@@ -995,33 +977,32 @@ kern_return_t memory_object_change_attributes(object, may_cache,
}
kern_return_t
-memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+memory_object_set_attributes(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, object_ready,
may_cache, copy_strategy,
TRUE);
}
-kern_return_t memory_object_ready(object, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+kern_return_t memory_object_ready(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, TRUE,
may_cache, copy_strategy,
FALSE);
}
-kern_return_t memory_object_get_attributes(object, object_ready,
- may_cache, copy_strategy)
- vm_object_t object;
- boolean_t *object_ready;
- boolean_t *may_cache;
- memory_object_copy_strategy_t *copy_strategy;
+kern_return_t memory_object_get_attributes(
+ vm_object_t object,
+ boolean_t *object_ready,
+ boolean_t *may_cache,
+ memory_object_copy_strategy_t *copy_strategy)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -1041,7 +1022,7 @@ kern_return_t memory_object_get_attributes(object, object_ready,
* If successful, consumes the supplied naked send right.
*/
kern_return_t vm_set_default_memory_manager(host, default_manager)
- host_t host;
+ const host_t host;
ipc_port_t *default_manager;
{
ipc_port_t current_manager;
@@ -1123,7 +1104,7 @@ ipc_port_t memory_manager_default_reference(void)
*/
boolean_t memory_manager_default_port(port)
- ipc_port_t port;
+ const ipc_port_t port;
{
ipc_port_t current;
boolean_t result;
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index 4fed312e..01bce2a5 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -64,7 +64,7 @@ void
memory_object_proxy_init (void)
{
kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
- sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0);
+ sizeof (struct memory_object_proxy), 0, NULL, 0);
}
/* Lookup a proxy memory object by its port. */
@@ -115,11 +115,11 @@ memory_object_proxy_notify (mach_msg_header_t *msg)
given OBJECT at OFFSET in the new object with the maximum
protection MAX_PROTECTION and return it in *PORT. */
kern_return_t
-memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
+memory_object_create_proxy (const ipc_space_t space, vm_prot_t max_protection,
ipc_port_t *object, natural_t object_count,
- vm_offset_t *offset, natural_t offset_count,
- vm_offset_t *start, natural_t start_count,
- vm_offset_t *len, natural_t len_count,
+ const vm_offset_t *offset, natural_t offset_count,
+ const vm_offset_t *start, natural_t start_count,
+ const vm_offset_t *len, natural_t len_count,
ipc_port_t *port)
{
memory_object_proxy_t proxy;
diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h
index f4be0d0d..dc0ea747 100644
--- a/vm/memory_object_proxy.h
+++ b/vm/memory_object_proxy.h
@@ -19,7 +19,7 @@
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
#ifndef _VM_MEMORY_OBJECT_PROXY_H_
-#define _VM_MEMORY_OBJECT_PROXT_H_
+#define _VM_MEMORY_OBJECT_PROXY_H_
#include <ipc/ipc_types.h>
#include <mach/boolean.h>
@@ -30,19 +30,8 @@
extern void memory_object_proxy_init (void);
extern boolean_t memory_object_proxy_notify (mach_msg_header_t *msg);
-extern kern_return_t memory_object_create_proxy (ipc_space_t space,
- vm_prot_t max_protection,
- ipc_port_t *object,
- natural_t object_count,
- vm_offset_t *offset,
- natural_t offset_count,
- vm_offset_t *start,
- natural_t start_count,
- vm_offset_t *len,
- natural_t len_count,
- ipc_port_t *port);
extern kern_return_t memory_object_proxy_lookup (ipc_port_t port,
ipc_port_t *object,
vm_prot_t *max_protection);
-#endif /* _VM_MEMORY_OBJECT_PROXT_H_ */
+#endif /* _VM_MEMORY_OBJECT_PROXY_H_ */
diff --git a/vm/pmap.h b/vm/pmap.h
index 59fd03ab..9bbcdc32 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -67,9 +67,6 @@
extern vm_offset_t pmap_steal_memory(vm_size_t);
/* During VM initialization, report remaining unused physical pages. */
extern unsigned int pmap_free_pages(void);
-/* During VM initialization, use remaining physical pages to allocate page
- * frames. */
-extern void pmap_startup(vm_offset_t *, vm_offset_t *);
/* Initialization, after kernel runs in virtual memory. */
extern void pmap_init(void);
@@ -80,18 +77,14 @@ extern void pmap_init(void);
* Otherwise, it must implement
* pmap_free_pages
* pmap_virtual_space
- * pmap_next_page
* pmap_init
- * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
- * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
- * and pmap_enter. pmap_free_pages may over-estimate the number
- * of unused physical pages, and pmap_next_page may return FALSE
- * to indicate that there are no more unused pages to return.
+ * and vm/vm_resident.c implements pmap_steal_memory using
+ * pmap_free_pages, pmap_virtual_space, and pmap_enter.
+ *
+ * pmap_free_pages may over-estimate the number of unused physical pages.
* However, for best performance pmap_free_pages should be accurate.
*/
-/* During VM initialization, return the next unused physical page. */
-extern boolean_t pmap_next_page(vm_offset_t *);
/* During VM initialization, report virtual space available for the kernel. */
extern void pmap_virtual_space(vm_offset_t *, vm_offset_t *);
#endif /* MACHINE_PAGES */
@@ -163,38 +156,16 @@ void pmap_clear_modify(vm_offset_t pa);
/* Return modify bit */
boolean_t pmap_is_modified(vm_offset_t pa);
-
-/*
- * Statistics routines
- */
-
-#ifndef pmap_resident_count
-extern int pmap_resident_count();
-#endif /* pmap_resident_count */
-
/*
* Sundry required routines
*/
/* Return a virtual-to-physical mapping, if possible. */
extern vm_offset_t pmap_extract(pmap_t, vm_offset_t);
-/* Is virtual address valid? */
-extern boolean_t pmap_access();
/* Perform garbage collection, if any. */
extern void pmap_collect(pmap_t);
/* Specify pageability. */
extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
-#ifndef pmap_phys_address
-/* Transform address returned by device driver mapping function to physical
- * address known to this module. */
-extern vm_offset_t pmap_phys_address();
-#endif /* pmap_phys_address */
-#ifndef pmap_phys_to_frame
-/* Inverse of pmap_phys_address, for use by device driver mapping function in
- * machine-independent pseudo-devices. */
-extern int pmap_phys_to_frame();
-#endif /* pmap_phys_to_frame */
-
/*
* Optional routines
*/
@@ -205,7 +176,7 @@ extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
#endif /* pmap_copy */
#ifndef pmap_attribute
/* Get/Set special memory attributes. */
-extern kern_return_t pmap_attribute();
+extern kern_return_t pmap_attribute(void);
#endif /* pmap_attribute */
/*
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
index 0af58b69..227090e6 100644
--- a/vm/vm_debug.c
+++ b/vm/vm_debug.c
@@ -65,8 +65,7 @@
*/
ipc_port_t
-vm_object_real_name(object)
- vm_object_t object;
+vm_object_real_name(vm_object_t object)
{
ipc_port_t port = IP_NULL;
@@ -94,11 +93,11 @@ vm_object_real_name(object)
*/
kern_return_t
-mach_vm_region_info(map, address, regionp, portp)
- vm_map_t map;
- vm_offset_t address;
- vm_region_info_t *regionp;
- ipc_port_t *portp;
+mach_vm_region_info(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_region_info_t *regionp,
+ ipc_port_t *portp)
{
vm_map_t cmap; /* current map in traversal */
vm_map_t nmap; /* next map to look at */
@@ -184,11 +183,11 @@ mach_vm_region_info(map, address, regionp, portp)
*/
kern_return_t
-mach_vm_object_info(object, infop, shadowp, copyp)
- vm_object_t object;
- vm_object_info_t *infop;
- ipc_port_t *shadowp;
- ipc_port_t *copyp;
+mach_vm_object_info(
+ vm_object_t object,
+ vm_object_info_t *infop,
+ ipc_port_t *shadowp,
+ ipc_port_t *copyp)
{
vm_object_info_t info;
vm_object_info_state_t state;
@@ -278,10 +277,10 @@ mach_vm_object_info(object, infop, shadowp, copyp)
*/
kern_return_t
-mach_vm_object_pages(object, pagesp, countp)
- vm_object_t object;
- vm_page_info_array_t *pagesp;
- natural_t *countp;
+mach_vm_object_pages(
+ vm_object_t object,
+ vm_page_info_array_t *pagesp,
+ natural_t *countp)
{
vm_size_t size;
vm_offset_t addr;
@@ -404,7 +403,7 @@ mach_vm_object_pages(object, pagesp, countp)
addr + rsize_used, size - rsize_used);
if (size_used != rsize_used)
- memset((char *) (addr + size_used), 0,
+ memset((void *) (addr + size_used), 0,
rsize_used - size_used);
kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
@@ -434,7 +433,7 @@ mach_vm_object_pages(object, pagesp, countp)
kern_return_t
host_virtual_physical_table_info(host, infop, countp)
- host_t host;
+ const host_t host;
hash_info_bucket_array_t *infop;
natural_t *countp;
{
diff --git a/vm/vm_external.c b/vm/vm_external.c
index e9643ffc..3b1a2879 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -35,6 +35,7 @@
#include <vm/vm_external.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
+#include <string.h>
@@ -56,8 +57,7 @@ struct kmem_cache vm_object_small_existence_map_cache;
struct kmem_cache vm_object_large_existence_map_cache;
-vm_external_t vm_external_create(size)
- vm_offset_t size;
+vm_external_t vm_external_create(vm_offset_t size)
{
vm_external_t result;
vm_size_t bytes;
@@ -70,16 +70,16 @@ vm_external_t vm_external_create(size)
result->existence_map =
(char *) kmem_cache_alloc(&vm_object_small_existence_map_cache);
result->existence_size = SMALL_SIZE;
- } else if (bytes <= LARGE_SIZE) {
+ } else {
result->existence_map =
(char *) kmem_cache_alloc(&vm_object_large_existence_map_cache);
result->existence_size = LARGE_SIZE;
}
+ memset (result->existence_map, 0, result->existence_size);
return(result);
}
-void vm_external_destroy(e)
- vm_external_t e;
+void vm_external_destroy(vm_external_t e)
{
if (e == VM_EXTERNAL_NULL)
return;
@@ -97,8 +97,8 @@ void vm_external_destroy(e)
}
vm_external_state_t _vm_external_state_get(e, offset)
- vm_external_t e;
- vm_offset_t offset;
+ const vm_external_t e;
+ vm_offset_t offset;
{
unsigned
int bit, byte;
@@ -115,10 +115,10 @@ vm_external_state_t _vm_external_state_get(e, offset)
VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
}
-void vm_external_state_set(e, offset, state)
- vm_external_t e;
- vm_offset_t offset;
- vm_external_state_t state;
+void vm_external_state_set(
+ vm_external_t e,
+ vm_offset_t offset,
+ vm_external_state_t state)
{
unsigned
int bit, byte;
@@ -140,13 +140,13 @@ void vm_external_module_initialize(void)
vm_size_t size = (vm_size_t) sizeof(struct vm_external);
kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_small_existence_map_cache,
"small_existence_map", SMALL_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_large_existence_map_cache,
"large_existence_map", LARGE_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
diff --git a/vm/vm_external.h b/vm/vm_external.h
index 55c9e48d..4e44ddf7 100644
--- a/vm/vm_external.h
+++ b/vm/vm_external.h
@@ -46,9 +46,14 @@ typedef struct vm_external {
* been written to backing
* storage.
*/
+#if 0
+ /* XXX: Currently, existence_count is not used. I guess it
+ could be useful to get rid of the map if the count drops to
+ zero. */
int existence_count;/* Number of bits turned on in
* existence_map.
*/
+#endif
} *vm_external_t;
#define VM_EXTERNAL_NULL ((vm_external_t) 0)
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 7e849616..09e2c54d 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -51,7 +51,7 @@
#include <mach/memory_object.h>
#include <vm/memory_object_user.user.h>
/* For memory_object_data_{request,unlock} */
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/slab.h>
#if MACH_PCSAMPLE
@@ -88,8 +88,6 @@ struct kmem_cache vm_fault_state_cache;
int vm_object_absent_max = 50;
-int vm_fault_debug = 0;
-
boolean_t vm_fault_dirty_handling = FALSE;
boolean_t vm_fault_interruptible = TRUE;
@@ -107,7 +105,7 @@ extern struct db_watchpoint *db_watchpoint_list;
void vm_fault_init(void)
{
kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
- sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0);
+ sizeof(vm_fault_state_t), 0, NULL, 0);
}
/*
@@ -125,9 +123,9 @@ void vm_fault_init(void)
* "object" must be locked.
*/
void
-vm_fault_cleanup(object, top_page)
- register vm_object_t object;
- register vm_page_t top_page;
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -204,33 +202,26 @@ vm_fault_cleanup(object, top_page)
* The "result_page" is also left busy. It is not removed
* from the pageout queues.
*/
-vm_fault_return_t vm_fault_page(first_object, first_offset,
- fault_type, must_be_resident, interruptible,
- protection,
- result_page, top_page,
- resume, continuation)
+vm_fault_return_t vm_fault_page(
/* Arguments: */
- vm_object_t first_object; /* Object to begin search */
- vm_offset_t first_offset; /* Offset into object */
- vm_prot_t fault_type; /* What access is requested */
- boolean_t must_be_resident;/* Must page be resident? */
- boolean_t interruptible; /* May fault be interrupted? */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
/* Modifies in place: */
- vm_prot_t *protection; /* Protection for mapping */
+ vm_prot_t *protection, /* Protection for mapping */
/* Returns: */
- vm_page_t *result_page; /* Page found, if successful */
- vm_page_t *top_page; /* Page in top object, if
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
* not result_page.
*/
/* More arguments: */
- boolean_t resume; /* We are restarting. */
- void (*continuation)(); /* Continuation for blocking. */
+ boolean_t resume, /* We are restarting. */
+ void (*continuation)()) /* Continuation for blocking. */
{
- register
vm_page_t m;
- register
vm_object_t object;
- register
vm_offset_t offset;
vm_page_t first_m;
vm_object_t next_object;
@@ -239,7 +230,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_prot_t access_required;
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
if (state->vmfp_backoff)
@@ -357,7 +348,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -616,7 +607,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* won't block for pages.
*/
- if (m->fictitious && !vm_page_convert(m, FALSE)) {
+ if (m->fictitious && !vm_page_convert(&m, FALSE)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -734,7 +725,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
assert(m->object == object);
first_m = VM_PAGE_NULL;
- if (m->fictitious && !vm_page_convert(m, !object->internal)) {
+ if (m->fictitious && !vm_page_convert(&m, !object->internal)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -777,12 +768,10 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* objects.
*/
-#if EXTRA_ASSERTIONS
assert(m->busy && !m->absent);
assert((first_m == VM_PAGE_NULL) ||
(first_m->busy && !first_m->absent &&
!first_m->active && !first_m->inactive));
-#endif /* EXTRA_ASSERTIONS */
/*
* If the page is being written, but isn't
@@ -1094,7 +1083,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_fault_cleanup(object, first_m);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1141,9 +1130,9 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
*/
void
-vm_fault_continue()
+vm_fault_continue(void)
{
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
(void) vm_fault(state->vmf_map,
@@ -1154,14 +1143,13 @@ vm_fault_continue()
/*NOTREACHED*/
}
-kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
- resume, continuation)
- vm_map_t map;
- vm_offset_t vaddr;
- vm_prot_t fault_type;
- boolean_t change_wiring;
- boolean_t resume;
- void (*continuation)();
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ void (*continuation)())
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
@@ -1173,11 +1161,10 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_page_t top_page; /* Placeholder page */
kern_return_t kr;
- register
vm_page_t m; /* Fast access to result_page */
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1253,7 +1240,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_object_paging_begin(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1307,7 +1294,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
goto done;
case VM_FAULT_MEMORY_SHORTAGE:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1490,7 +1477,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
done:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
@@ -1501,21 +1488,19 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
return(kr);
}
-kern_return_t vm_fault_wire_fast();
-
/*
* vm_fault_wire:
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
pmap = vm_map_pmap(map);
@@ -1544,14 +1529,14 @@ void vm_fault_wire(map, entry)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
- vm_object_t object;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
+ vm_object_t object;
pmap = vm_map_pmap(map);
@@ -1633,14 +1618,14 @@ void vm_fault_unwire(map, entry)
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t vm_fault_wire_fast(map, va, entry)
- vm_map_t map;
- vm_offset_t va;
- vm_map_entry_t entry;
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
{
vm_object_t object;
vm_offset_t offset;
- register vm_page_t m;
+ vm_page_t m;
vm_prot_t prot;
vm_stat.faults++; /* needs lock XXX */
@@ -1782,9 +1767,9 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
* Release a page used by vm_fault_copy.
*/
-void vm_fault_copy_cleanup(page, top_page)
- vm_page_t page;
- vm_page_t top_page;
+void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
{
vm_object_t object = page->object;
@@ -1825,23 +1810,14 @@ void vm_fault_copy_cleanup(page, top_page)
* requested.
*/
kern_return_t vm_fault_copy(
- src_object,
- src_offset,
- src_size,
- dst_object,
- dst_offset,
- dst_map,
- dst_version,
- interruptible
- )
- vm_object_t src_object;
- vm_offset_t src_offset;
- vm_size_t *src_size; /* INOUT */
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_map_t dst_map;
- vm_map_version_t *dst_version;
- boolean_t interruptible;
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
{
vm_page_t result_page;
vm_prot_t prot;
@@ -2022,13 +1998,11 @@ kern_return_t vm_fault_copy(
* XXX Untested. Also unused. Eventually, this technology
* could be used in vm_fault_copy() to advantage.
*/
-vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
- register
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_page_t *result_page; /* OUT */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
{
- register
vm_page_t dst_page;
#define interruptible FALSE /* XXX */
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
index 0492ccf4..7fdbc417 100644
--- a/vm/vm_fault.h
+++ b/vm/vm_fault.h
@@ -69,4 +69,10 @@ extern void vm_fault_unwire(vm_map_t, vm_map_entry_t);
extern kern_return_t vm_fault_copy(vm_object_t, vm_offset_t, vm_size_t *,
vm_object_t, vm_offset_t, vm_map_t,
vm_map_version_t *, boolean_t);
+
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry);
+
#endif /* _VM_VM_FAULT_H_ */
diff --git a/vm/vm_init.c b/vm/vm_init.c
index 89eb0984..23d5d46e 100644
--- a/vm/vm_init.c
+++ b/vm/vm_init.c
@@ -51,7 +51,7 @@
* This is done only by the first cpu up.
*/
-void vm_mem_bootstrap()
+void vm_mem_bootstrap(void)
{
vm_offset_t start, end;
@@ -79,8 +79,9 @@ void vm_mem_bootstrap()
memory_manager_default_init();
}
-void vm_mem_init()
+void vm_mem_init(void)
{
vm_object_init();
memory_object_proxy_init();
+ vm_page_info_all();
}
diff --git a/vm/vm_init.h b/vm/vm_init.h
new file mode 100644
index 00000000..42ef48b2
--- /dev/null
+++ b/vm/vm_init.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _VM_VM_INIT_H_
+#define _VM_VM_INIT_H_
+
+extern void vm_mem_init(void);
+extern void vm_mem_bootstrap(void);
+
+#endif /* _VM_VM_INIT_H_ */
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index fd46e982..9c0a20b7 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -42,6 +42,7 @@
#include <kern/assert.h>
#include <kern/debug.h>
#include <kern/lock.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/printf.h>
#include <vm/pmap.h>
@@ -62,9 +63,6 @@ static struct vm_map kernel_map_store;
vm_map_t kernel_map = &kernel_map_store;
vm_map_t kernel_pageable_map;
-extern void kmem_alloc_pages();
-extern void kmem_remap_pages();
-
/*
* projected_buffer_allocate
*
@@ -82,15 +80,14 @@ extern void kmem_remap_pages();
*/
kern_return_t
-projected_buffer_allocate(map, size, persistence, kernel_p,
- user_p, protection, inheritance)
- vm_map_t map;
- vm_size_t size;
- int persistence;
- vm_offset_t *kernel_p;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_object_t object;
vm_map_entry_t u_entry, k_entry;
@@ -180,13 +177,13 @@ projected_buffer_allocate(map, size, persistence, kernel_p,
*/
kern_return_t
-projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
- vm_map_t map;
- vm_offset_t kernel_addr;
- vm_size_t size;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_map_entry_t u_entry, k_entry;
vm_offset_t physical_addr, user_addr;
@@ -253,15 +250,18 @@ projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
*/
kern_return_t
-projected_buffer_deallocate(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry, k_entry;
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return KERN_INVALID_ARGUMENT;
+
vm_map_lock(map);
- if (map == VM_MAP_NULL || map == kernel_map ||
- !vm_map_lookup_entry(map, start, &entry) ||
+ if (!vm_map_lookup_entry(map, start, &entry) ||
end > entry->vme_end ||
/*Check corresponding kernel entry*/
(k_entry = entry->projected_on) == 0) {
@@ -303,8 +303,7 @@ projected_buffer_deallocate(map, start, end)
*/
kern_return_t
-projected_buffer_collect(map)
- vm_map_t map;
+projected_buffer_collect(vm_map_t map)
{
vm_map_entry_t entry, next;
@@ -330,9 +329,10 @@ projected_buffer_collect(map)
*/
boolean_t
-projected_buffer_in_range(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
@@ -359,14 +359,15 @@ projected_buffer_in_range(map, start, end)
*/
kern_return_t
-kmem_alloc(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_object_t object;
vm_map_entry_t entry;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -385,12 +386,22 @@ kmem_alloc(map, addrp, size)
size = round_page(size);
object = vm_object_allocate(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
VM_OBJECT_NULL, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc in %p\n", map);
vm_object_deallocate(object);
return kr;
}
@@ -420,113 +431,25 @@ kmem_alloc(map, addrp, size)
}
/*
- * kmem_realloc:
- *
- * Reallocate wired-down memory in the kernel's address map
- * or a submap. Newly allocated pages are not zeroed.
- * This can only be used on regions allocated with kmem_alloc.
- *
- * If successful, the pages in the old region are mapped twice.
- * The old region is unchanged. Use kmem_free to get rid of it.
- */
-kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
- vm_map_t map;
- vm_offset_t oldaddr;
- vm_size_t oldsize;
- vm_offset_t *newaddrp;
- vm_size_t newsize;
-{
- vm_offset_t oldmin, oldmax;
- vm_offset_t newaddr;
- vm_object_t object;
- vm_map_entry_t oldentry, newentry;
- kern_return_t kr;
-
- oldmin = trunc_page(oldaddr);
- oldmax = round_page(oldaddr + oldsize);
- oldsize = oldmax - oldmin;
- newsize = round_page(newsize);
-
- /*
- * Find space for the new region.
- */
-
- vm_map_lock(map);
- kr = vm_map_find_entry(map, &newaddr, newsize, (vm_offset_t) 0,
- VM_OBJECT_NULL, &newentry);
- if (kr != KERN_SUCCESS) {
- vm_map_unlock(map);
- printf_once("no more room for kmem_realloc in %p\n", map);
- return kr;
- }
-
- /*
- * Find the VM object backing the old region.
- */
-
- if (!vm_map_lookup_entry(map, oldmin, &oldentry))
- panic("kmem_realloc");
- object = oldentry->object.vm_object;
-
- /*
- * Increase the size of the object and
- * fill in the new region.
- */
-
- vm_object_reference(object);
- vm_object_lock(object);
- if (object->size != oldsize)
- panic("kmem_realloc");
- object->size = newsize;
- vm_object_unlock(object);
-
- newentry->object.vm_object = object;
- newentry->offset = 0;
-
- /*
- * Since we have not given out this address yet,
- * it is safe to unlock the map. We are trusting
- * that nobody will play with either region.
- */
-
- vm_map_unlock(map);
-
- /*
- * Remap the pages in the old region and
- * allocate more pages for the new region.
- */
-
- kmem_remap_pages(object, 0,
- newaddr, newaddr + oldsize,
- VM_PROT_DEFAULT);
- kmem_alloc_pages(object, oldsize,
- newaddr + oldsize, newaddr + newsize,
- VM_PROT_DEFAULT);
-
- *newaddrp = newaddr;
- return KERN_SUCCESS;
-}
-
-/*
* kmem_alloc_wired:
*
* Allocate wired-down memory in the kernel's address map
* or a submap. The memory is not zero-filled.
*
* The memory is allocated in the kernel_object.
- * It may not be copied with vm_map_copy, and
- * it may not be reallocated with kmem_realloc.
+ * It may not be copied with vm_map_copy.
*/
kern_return_t
-kmem_alloc_wired(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -537,12 +460,22 @@ kmem_alloc_wired(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc_wired in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_wired in %p\n", map);
return kr;
}
@@ -591,14 +524,15 @@ kmem_alloc_wired(map, addrp, size)
*/
kern_return_t
-kmem_alloc_aligned(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
if ((size & (size - 1)) != 0)
@@ -612,12 +546,22 @@ kmem_alloc_aligned(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, size - 1,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more rooom for kmem_alloc_aligned in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_aligned in %p\n", map);
return kr;
}
@@ -665,10 +609,10 @@ kmem_alloc_aligned(map, addrp, size)
*/
kern_return_t
-kmem_alloc_pageable(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -696,10 +640,10 @@ kmem_alloc_pageable(map, addrp, size)
*/
void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
@@ -714,11 +658,12 @@ kmem_free(map, addr, size)
* a submap.
*/
void
-kmem_alloc_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -726,7 +671,7 @@ kmem_alloc_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -769,11 +714,12 @@ kmem_alloc_pages(object, offset, start, end, protection)
* a submap.
*/
void
-kmem_remap_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -781,7 +727,7 @@ kmem_remap_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -827,11 +773,13 @@ kmem_remap_pages(object, offset, start, end, protection)
*/
void
-kmem_submap(map, parent, min, max, size, pageable)
- vm_map_t map, parent;
- vm_offset_t *min, *max;
- vm_size_t size;
- boolean_t pageable;
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size,
+ boolean_t pageable)
{
vm_offset_t addr;
kern_return_t kr;
@@ -845,7 +793,7 @@ kmem_submap(map, parent, min, max, size, pageable)
*/
vm_object_reference(vm_submap_object);
- addr = (vm_offset_t) vm_map_min(parent);
+ addr = vm_map_min(parent);
kr = vm_map_enter(parent, &addr, size,
(vm_offset_t) 0, TRUE,
vm_submap_object, (vm_offset_t) 0, FALSE,
@@ -869,9 +817,9 @@ kmem_submap(map, parent, min, max, size, pageable)
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
-void kmem_init(start, end)
- vm_offset_t start;
- vm_offset_t end;
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
FALSE);
@@ -879,7 +827,6 @@ void kmem_init(start, end)
/*
* Reserve virtual memory allocated up to this time.
*/
-
if (start != VM_MIN_KERNEL_ADDRESS) {
kern_return_t rc;
vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
@@ -890,7 +837,7 @@ void kmem_init(start, end)
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (rc)
- panic("%s:%d: vm_map_enter failed (%d)\n", rc);
+ panic("vm_map_enter failed (%d)\n", rc);
}
}
@@ -907,21 +854,19 @@ void kmem_init(start, end)
*/
kern_return_t
-kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
- vm_map_t map;
- vm_offset_t *addr; /* actual addr of data */
- vm_offset_t *alloc_addr; /* page aligned addr */
- vm_size_t *alloc_size; /* size allocated */
- vm_map_copy_t copy;
- vm_size_t min_size; /* Do at least this much */
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
kern_return_t ret;
- register
vm_page_t *page_list;
vm_map_copy_t new_copy;
- register
int i;
assert(copy->type == VM_MAP_COPY_PAGE_LIST);
@@ -1013,10 +958,10 @@ kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
*/
void
-kmem_io_map_deallocate(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
/*
* Remove the mappings. The pmap_remove is needed.
@@ -1035,10 +980,11 @@ kmem_io_map_deallocate(map, addr, size)
* and the kernel map/submaps.
*/
-int copyinmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
@@ -1061,10 +1007,11 @@ int copyinmap(map, fromaddr, toaddr, length)
* and the kernel map/submaps.
*/
-int copyoutmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index 22b7c123..fb8ac7f8 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -54,8 +54,6 @@ extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *,
vm_size_t);
extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
-extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t,
- vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
@@ -82,4 +80,18 @@ extern boolean_t projected_buffer_in_range(
vm_offset_t start,
vm_offset_t end);
+extern void kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
+extern void kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
#endif /* _VM_VM_KERN_H_ */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 2be71471..89a2b382 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -58,20 +58,6 @@
#include <vm/vm_print.h>
#endif /* MACH_KDB */
-
-/* Forward declarations */
-kern_return_t vm_map_delete(
- vm_map_t map,
- vm_offset_t start,
- vm_offset_t end);
-
-kern_return_t vm_map_copyout_page_list(
- vm_map_t dst_map,
- vm_offset_t *dst_addr, /* OUT */
- vm_map_copy_t copy);
-
-void vm_map_copy_page_discard (vm_map_copy_t copy);
-
/*
* Macros to copy a vm_map_entry. We must be careful to correctly
* manage the wired page count. vm_map_entry_copy() creates a new
@@ -140,11 +126,8 @@ MACRO_END
struct kmem_cache vm_map_cache; /* cache for vm_map structures */
struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
-struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
-boolean_t vm_map_lookup_entry(); /* forward declaration */
-
/*
* Placeholder object for submap operations. This object is dropped
* into the range by a call to vm_map_find, and removed when
@@ -163,58 +146,36 @@ vm_object_t vm_submap_object = &vm_submap_object_store;
* Map and entry structures are allocated from caches -- we must
* initialize those caches.
*
- * There are three caches of interest:
+ * There are two caches of interest:
*
* vm_map_cache: used to allocate maps.
* vm_map_entry_cache: used to allocate map entries.
- * vm_map_kentry_cache: used to allocate map entries for the kernel.
*
- * Kernel map entries are allocated from a special cache, using a custom
- * page allocation function to avoid recursion. It would be difficult
- * (perhaps impossible) for the kernel to allocate more memory to an entry
- * cache when it became empty since the very act of allocating memory
- * implies the creation of a new entry.
+ * We make sure the map entry cache allocates memory directly from the
+ * physical allocator to avoid recursion with this module.
*/
-vm_offset_t kentry_data;
-vm_size_t kentry_data_size = KENTRY_DATA_SIZE;
-
-static vm_offset_t kentry_pagealloc(vm_size_t size)
-{
- vm_offset_t result;
-
- if (size > kentry_data_size)
- panic("vm_map: kentry memory exhausted");
-
- result = kentry_data;
- kentry_data += size;
- kentry_data_size -= size;
- return result;
-}
-
void vm_map_init(void)
{
kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
- sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
- kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
- sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
- NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
- | KMEM_CACHE_NORECLAIM);
+ sizeof(struct vm_map_entry), 0, NULL,
+ KMEM_CACHE_NOOFFSLAB | KMEM_CACHE_PHYSMEM);
kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
- sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_map_copy), 0, NULL, 0);
/*
* Submap object is initialized by vm_object_init.
*/
}
-void vm_map_setup(map, pmap, min, max, pageable)
- vm_map_t map;
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+void vm_map_setup(
+ vm_map_t map,
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
vm_map_first_entry(map) = vm_map_to_entry(map);
vm_map_last_entry(map) = vm_map_to_entry(map);
@@ -223,6 +184,7 @@ void vm_map_setup(map, pmap, min, max, pageable)
rbtree_init(&map->hdr.tree);
map->size = 0;
+ map->user_wired = 0;
map->ref_count = 1;
map->pmap = pmap;
map->min_offset = min;
@@ -243,12 +205,13 @@ void vm_map_setup(map, pmap, min, max, pageable)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t vm_map_create(pmap, min, max, pageable)
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+vm_map_t vm_map_create(
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
- register vm_map_t result;
+ vm_map_t result;
result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
if (result == VM_MAP_NULL)
@@ -272,17 +235,11 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
_vm_map_entry_create(&(copy)->cpy_hdr)
vm_map_entry_t _vm_map_entry_create(map_header)
- register struct vm_map_header *map_header;
+ const struct vm_map_header *map_header;
{
- register kmem_cache_t cache;
- register vm_map_entry_t entry;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ vm_map_entry_t entry;
- entry = (vm_map_entry_t) kmem_cache_alloc(cache);
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
@@ -301,17 +258,12 @@ vm_map_entry_t _vm_map_entry_create(map_header)
_vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
void _vm_map_entry_dispose(map_header, entry)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
+ const struct vm_map_header *map_header;
+ vm_map_entry_t entry;
{
- register kmem_cache_t cache;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ (void)map_header;
- kmem_cache_free(cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
}
/*
@@ -386,8 +338,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
* Creates another valid reference to the given map.
*
*/
-void vm_map_reference(map)
- register vm_map_t map;
+void vm_map_reference(vm_map_t map)
{
if (map == VM_MAP_NULL)
return;
@@ -404,10 +355,9 @@ void vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void vm_map_deallocate(map)
- register vm_map_t map;
+void vm_map_deallocate(vm_map_t map)
{
- register int c;
+ int c;
if (map == VM_MAP_NULL)
return;
@@ -449,13 +399,13 @@ void vm_map_deallocate(map)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t vm_map_lookup_entry(map, address, entry)
- register vm_map_t map;
- register vm_offset_t address;
- vm_map_entry_t *entry; /* OUT */
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry) /* OUT */
{
- register struct rbtree_node *node;
- register vm_map_entry_t hint;
+ struct rbtree_node *node;
+ vm_map_entry_t hint;
/*
* First, make a quick check to see if we are already
@@ -506,10 +456,11 @@ boolean_t vm_map_lookup_entry(map, address, entry)
*/
boolean_t
-invalid_user_access(map, start, end, prot)
- vm_map_t map;
- vm_offset_t start, end;
- vm_prot_t prot;
+invalid_user_access(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot)
{
vm_map_entry_t entry;
@@ -533,17 +484,17 @@ invalid_user_access(map, start, end, prot)
* are initialized to zero. If an object is supplied,
* then an existing entry may be extended.
*/
-kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
- register vm_map_t map;
- vm_offset_t *address; /* OUT */
- vm_size_t size;
- vm_offset_t mask;
- vm_object_t object;
- vm_map_entry_t *o_entry; /* OUT */
+kern_return_t vm_map_find_entry(
+ vm_map_t map,
+ vm_offset_t *address, /* OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ vm_object_t object,
+ vm_map_entry_t *o_entry) /* OUT */
{
- register vm_map_entry_t entry, new_entry;
- register vm_offset_t start;
- register vm_offset_t end;
+ vm_map_entry_t entry, new_entry;
+ vm_offset_t start;
+ vm_offset_t end;
/*
* Look for the first possible address;
@@ -562,7 +513,7 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -687,8 +638,8 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
return(KERN_SUCCESS);
}
-int vm_map_pmap_enter_print = FALSE;
-int vm_map_pmap_enter_enable = FALSE;
+boolean_t vm_map_pmap_enter_print = FALSE;
+boolean_t vm_map_pmap_enter_enable = FALSE;
/*
* Routine: vm_map_pmap_enter
@@ -705,19 +656,16 @@ int vm_map_pmap_enter_enable = FALSE;
* The source map should not be locked on entry.
*/
void
-vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
- vm_map_t map;
- register
- vm_offset_t addr;
- register
- vm_offset_t end_addr;
- register
- vm_object_t object;
- vm_offset_t offset;
- vm_prot_t protection;
+vm_map_pmap_enter(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_offset_t end_addr,
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_prot_t protection)
{
while (addr < end_addr) {
- register vm_page_t m;
+ vm_page_t m;
vm_object_lock(object);
vm_object_paging_begin(object);
@@ -766,27 +714,22 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
* Arguments are as defined in the vm_map call.
*/
kern_return_t vm_map_enter(
- map,
- address, size, mask, anywhere,
- object, offset, needs_copy,
- cur_protection, max_protection, inheritance)
- register
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- vm_object_t object;
- vm_offset_t offset;
- boolean_t needs_copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ vm_object_t object,
+ vm_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register vm_map_entry_t entry;
- register vm_offset_t start;
- register vm_offset_t end;
- kern_return_t result = KERN_SUCCESS;
+ vm_map_entry_t entry;
+ vm_offset_t start;
+ vm_offset_t end;
+ kern_return_t result = KERN_SUCCESS;
#define RETURN(value) { result = value; goto BailOut; }
@@ -832,7 +775,7 @@ kern_return_t vm_map_enter(
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -980,7 +923,7 @@ kern_return_t vm_map_enter(
*/
/**/ {
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
new_entry = vm_map_entry_create(map);
@@ -1051,14 +994,12 @@ kern_return_t vm_map_enter(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_start();
#define vm_map_clip_start(map, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
_vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \
MACRO_END
-void _vm_map_copy_clip_start();
#define vm_map_copy_clip_start(copy, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
@@ -1069,12 +1010,12 @@ void _vm_map_copy_clip_start();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_start(map_header, entry, start)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t start;
+void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Split off the front portion --
@@ -1106,14 +1047,12 @@ void _vm_map_clip_start(map_header, entry, start)
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
#define vm_map_clip_end(map, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
_vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \
MACRO_END
-void _vm_map_copy_clip_end();
#define vm_map_copy_clip_end(copy, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
@@ -1124,12 +1063,12 @@ void _vm_map_copy_clip_end();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_end(map_header, entry, end)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t end;
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Create a new entry and insert it
@@ -1184,15 +1123,15 @@ void _vm_map_clip_end(map_header, entry, end)
* range from the superior map, and then destroy the
* submap (if desired). [Better yet, don't try it.]
*/
-kern_return_t vm_map_submap(map, start, end, submap)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- vm_map_t submap;
+kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap)
{
vm_map_entry_t entry;
- register kern_return_t result = KERN_INVALID_ARGUMENT;
- register vm_object_t object;
+ kern_return_t result = KERN_INVALID_ARGUMENT;
+ vm_object_t object;
vm_map_lock(map);
@@ -1232,15 +1171,15 @@ kern_return_t vm_map_submap(map, start, end, submap)
* specified, the maximum protection is to be set;
* otherwise, only the current protection is affected.
*/
-kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t new_prot;
- register boolean_t set_max;
+kern_return_t vm_map_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
{
- register vm_map_entry_t current;
- vm_map_entry_t entry;
+ vm_map_entry_t current;
+ vm_map_entry_t entry;
vm_map_lock(map);
@@ -1320,13 +1259,13 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
* affects how the map will be shared with
* child maps at the time of vm_map_fork.
*/
-kern_return_t vm_map_inherit(map, start, end, new_inheritance)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_inherit_t new_inheritance;
+kern_return_t vm_map_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_inherit_t new_inheritance)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t temp_entry;
vm_map_lock(map);
@@ -1369,14 +1308,14 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance)
* Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
* or vm_map_pageable_user); don't call vm_map_pageable directly.
*/
-kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t access_type;
- boolean_t user_wire;
+kern_return_t vm_map_pageable_common(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t start_entry;
vm_map_lock(map);
@@ -1436,7 +1375,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ {
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
+ }
}
else {
entry->wired_count--;
@@ -1513,7 +1455,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if ((entry->user_wired_count)++ == 0)
+ {
+ map->user_wired += entry->vme_end - entry->vme_start;
entry->wired_count++;
+ }
}
else {
entry->wired_count++;
@@ -1539,7 +1484,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
(entry->vme_end > start)) {
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ {
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
+ }
}
else {
entry->wired_count--;
@@ -1618,12 +1566,12 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
*
* Deallocate the given entry from the target map.
*/
-void vm_map_entry_delete(map, entry)
- register vm_map_t map;
- register vm_map_entry_t entry;
+void vm_map_entry_delete(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t s, e;
- register vm_object_t object;
+ vm_offset_t s, e;
+ vm_object_t object;
extern vm_object_t kernel_object;
s = entry->vme_start;
@@ -1654,6 +1602,8 @@ void vm_map_entry_delete(map, entry)
if (entry->wired_count != 0) {
vm_fault_unwire(map, entry);
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
}
@@ -1702,10 +1652,10 @@ void vm_map_entry_delete(map, entry)
* map.
*/
-kern_return_t vm_map_delete(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
vm_map_entry_t first_entry;
@@ -1785,12 +1735,12 @@ kern_return_t vm_map_delete(map, start, end)
* Remove the given address range from the target map.
* This is the exported form of vm_map_delete.
*/
-kern_return_t vm_map_remove(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_remove(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register kern_return_t result;
+ kern_return_t result;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1808,12 +1758,11 @@ kern_return_t vm_map_remove(map, start, end)
* that have not already been stolen.
*/
void
-vm_map_copy_steal_pages(copy)
-vm_map_copy_t copy;
+vm_map_copy_steal_pages(vm_map_copy_t copy)
{
- register vm_page_t m, new_m;
- register int i;
- vm_object_t object;
+ vm_page_t m, new_m;
+ int i;
+ vm_object_t object;
for (i = 0; i < copy->cpy_npages; i++) {
@@ -1855,8 +1804,7 @@ vm_map_copy_t copy;
* stolen, they are freed. If the pages are not stolen, they
* are unbusied, and associated state is cleaned up.
*/
-void vm_map_copy_page_discard(copy)
-vm_map_copy_t copy;
+void vm_map_copy_page_discard(vm_map_copy_t copy)
{
while (copy->cpy_npages > 0) {
vm_page_t m;
@@ -1901,8 +1849,7 @@ vm_map_copy_t copy;
* vm_map_copyin).
*/
void
-vm_map_copy_discard(copy)
- vm_map_copy_t copy;
+vm_map_copy_discard(vm_map_copy_t copy)
{
free_next_copy:
if (copy == VM_MAP_COPY_NULL)
@@ -1943,7 +1890,7 @@ free_next_copy:
* here to avoid tail recursion.
*/
if (copy->cpy_cont == vm_map_copy_discard_cont) {
- register vm_map_copy_t new_copy;
+ vm_map_copy_t new_copy;
new_copy = (vm_map_copy_t) copy->cpy_cont_args;
kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
@@ -1978,8 +1925,7 @@ free_next_copy:
* deallocation will not fail.
*/
vm_map_copy_t
-vm_map_copy_copy(copy)
- vm_map_copy_t copy;
+vm_map_copy_copy(vm_map_copy_t copy)
{
vm_map_copy_t new_copy;
@@ -2025,9 +1971,9 @@ vm_map_copy_copy(copy)
* A version of vm_map_copy_discard that can be called
* as a continuation from a vm_map_copy page list.
*/
-kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copy_discard_cont(
+vm_map_copyin_args_t cont_args,
+vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_discard((vm_map_copy_t) cont_args);
if (copy_result != (vm_map_copy_t *)0)
@@ -2082,11 +2028,11 @@ vm_map_copy_t *copy_result; /* OUT */
* atomically and interruptibly, an error indication is
* returned.
*/
-kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
- vm_map_t dst_map;
- vm_offset_t dst_addr;
- vm_map_copy_t copy;
- boolean_t interruptible;
+kern_return_t vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
{
vm_size_t size;
vm_offset_t start;
@@ -2305,6 +2251,8 @@ start_pass_1:
entry->offset = copy_entry->offset;
entry->needs_copy = copy_entry->needs_copy;
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ dst_map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
vm_map_copy_entry_unlink(copy, copy_entry);
@@ -2459,19 +2407,16 @@ start_pass_1:
* If successful, consumes the copy object.
* Otherwise, the caller is responsible for it.
*/
-kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_size_t adjustment;
vm_offset_t start;
vm_offset_t vm_copy_start;
vm_map_entry_t last;
- register
vm_map_entry_t entry;
/*
@@ -2559,15 +2504,8 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Mismatches occur when dealing with the default
* pager.
*/
- kmem_cache_t old_cache;
vm_map_entry_t next, new;
- /*
- * Find the cache that the copies were allocated from
- */
- old_cache = (copy->cpy_hdr.entries_pageable)
- ? &vm_map_entry_cache
- : &vm_map_kentry_cache;
entry = vm_map_copy_first_entry(copy);
/*
@@ -2576,6 +2514,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
*/
copy->cpy_hdr.nentries = 0;
copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+ rbtree_init(&copy->cpy_hdr.tree);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) =
vm_map_copy_to_entry(copy);
@@ -2590,7 +2529,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
- kmem_cache_free(old_cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
entry = next;
}
}
@@ -2617,9 +2556,9 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* map the pages into the destination map.
*/
if (entry->wired_count != 0) {
- register vm_offset_t va;
- vm_offset_t offset;
- register vm_object_t object;
+ vm_offset_t va;
+ vm_offset_t offset;
+ vm_object_t object;
object = entry->object.vm_object;
offset = entry->offset;
@@ -2631,7 +2570,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
TRUE);
while (va < entry->vme_end) {
- register vm_page_t m;
+ vm_page_t m;
/*
* Look up the page in the object.
@@ -2716,19 +2655,16 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Version of vm_map_copyout() for page list vm map copies.
*
*/
-kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_offset_t start;
vm_offset_t end;
vm_offset_t offset;
vm_map_entry_t last;
- register
vm_object_t object;
vm_page_t *page_list, m;
vm_map_entry_t entry;
@@ -2906,6 +2842,7 @@ create_object:
if (must_wire) {
entry->wired_count = 1;
+ dst_map->user_wired += entry->vme_end - entry->vme_start;
entry->user_wired_count = 1;
} else {
entry->wired_count = 0;
@@ -3106,12 +3043,12 @@ error:
* In/out conditions:
* The source map should not be locked on entry.
*/
-kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_entry_t tmp_entry; /* Result of last map lookup --
* in multi-level lookup, this
@@ -3125,7 +3062,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
vm_offset_t src_end; /* End of entire region to be
* copied */
- register
vm_map_copy_t copy; /* Resulting copy */
/*
@@ -3192,14 +3128,12 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
*/
while (TRUE) {
- register
vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
vm_size_t src_size; /* Size of source
* map entry (in both
* maps)
*/
- register
vm_object_t src_object; /* Object to copy */
vm_offset_t src_offset;
@@ -3208,7 +3142,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* for copy-on-write?
*/
- register
vm_map_entry_t new_entry; /* Map entry for copy */
boolean_t new_entry_needs_copy; /* Will new entry be COW? */
@@ -3472,11 +3405,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* Our caller donates an object reference.
*/
-kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
- vm_object_t object;
- vm_offset_t offset; /* offset of region in object */
- vm_size_t size; /* size of region in object */
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_t copy; /* Resulting copy */
@@ -3517,12 +3450,12 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* the scheduler.
*/
-kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_page_list_cont(
+ vm_map_copyin_args_t cont_args,
+ vm_map_copy_t *copy_result) /* OUT */
{
kern_return_t result = 0; /* '=0' to quiet gcc warnings */
- register boolean_t do_abort, src_destroy, src_destroy_only;
+ boolean_t do_abort, src_destroy, src_destroy_only;
/*
* Check for cases that only require memory destruction.
@@ -3573,27 +3506,23 @@ vm_map_copy_t *copy_result; /* OUT */
* the recipient of this copy_result must be prepared to deal with it.
*/
-kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
- steal_pages, copy_result, is_cont)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- boolean_t steal_pages;
- vm_map_copy_t *copy_result; /* OUT */
- boolean_t is_cont;
+kern_return_t vm_map_copyin_page_list(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ boolean_t steal_pages,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t is_cont)
{
vm_map_entry_t src_entry;
vm_page_t m;
vm_offset_t src_start;
vm_offset_t src_end;
vm_size_t src_size;
- register
vm_object_t src_object;
- register
vm_offset_t src_offset;
vm_offset_t src_last_offset;
- register
vm_map_copy_t copy; /* Resulting copy */
kern_return_t result = KERN_SUCCESS;
boolean_t need_map_lookup;
@@ -3927,7 +3856,7 @@ retry:
*/
src_start = trunc_page(src_addr);
if (steal_pages) {
- register int i;
+ int i;
vm_offset_t unwire_end;
unwire_end = src_start;
@@ -3999,6 +3928,8 @@ retry:
assert(src_entry->wired_count > 0);
src_entry->wired_count = 0;
+ if (src_entry->user_wired_count)
+ src_map->user_wired -= src_entry->vme_end - src_entry->vme_start;
src_entry->user_wired_count = 0;
unwire_end = src_entry->vme_end;
pmap_pageable(vm_map_pmap(src_map),
@@ -4104,18 +4035,14 @@ error:
*
* The source map must not be locked.
*/
-vm_map_t vm_map_fork(old_map)
- vm_map_t old_map;
+vm_map_t vm_map_fork(vm_map_t old_map)
{
vm_map_t new_map;
- register
vm_map_entry_t old_entry;
- register
vm_map_entry_t new_entry;
pmap_t new_pmap = pmap_create((vm_size_t) 0);
vm_size_t new_size = 0;
vm_size_t entry_size;
- register
vm_object_t object;
vm_map_lock(old_map);
@@ -4378,21 +4305,20 @@ vm_map_t vm_map_fork(old_map)
* copying operations, although the data referenced will
* remain the same.
*/
-kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
- object, offset, out_prot, wired)
- vm_map_t *var_map; /* IN/OUT */
- register vm_offset_t vaddr;
- register vm_prot_t fault_type;
-
- vm_map_version_t *out_version; /* OUT */
- vm_object_t *object; /* OUT */
- vm_offset_t *offset; /* OUT */
- vm_prot_t *out_prot; /* OUT */
- boolean_t *wired; /* OUT */
+kern_return_t vm_map_lookup(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired) /* OUT */
{
- register vm_map_entry_t entry;
- register vm_map_t map = *var_map;
- register vm_prot_t prot;
+ vm_map_entry_t entry;
+ vm_map_t map = *var_map;
+ vm_prot_t prot;
RetryLookup: ;
@@ -4560,11 +4486,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
* since the given version. If successful, the map
* will not change until vm_map_verify_done() is called.
*/
-boolean_t vm_map_verify(map, version)
- register
- vm_map_t map;
- register
- vm_map_version_t *version; /* REF */
+boolean_t vm_map_verify(
+ vm_map_t map,
+ vm_map_version_t *version) /* REF */
{
boolean_t result;
@@ -4593,24 +4517,19 @@ boolean_t vm_map_verify(map, version)
* a task's address map.
*/
-kern_return_t vm_region(map, address, size,
- protection, max_protection,
- inheritance, is_shared,
- object_name, offset_in_object)
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t *size; /* OUT */
- vm_prot_t *protection; /* OUT */
- vm_prot_t *max_protection; /* OUT */
- vm_inherit_t *inheritance; /* OUT */
- boolean_t *is_shared; /* OUT */
- ipc_port_t *object_name; /* OUT */
- vm_offset_t *offset_in_object; /* OUT */
+kern_return_t vm_region(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_prot_t *protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t *inheritance, /* OUT */
+ boolean_t *is_shared, /* OUT */
+ ipc_port_t *object_name, /* OUT */
+ vm_offset_t *offset_in_object) /* OUT */
{
vm_map_entry_t tmp_entry;
- register
vm_map_entry_t entry;
- register
vm_offset_t tmp_offset;
vm_offset_t start;
@@ -4667,9 +4586,9 @@ kern_return_t vm_region(map, address, size,
* at allocation time because the adjacent entry
* is often wired down.
*/
-void vm_map_simplify(map, start)
- vm_map_t map;
- vm_offset_t start;
+void vm_map_simplify(
+ vm_map_t map,
+ vm_offset_t start)
{
vm_map_entry_t this_entry;
vm_map_entry_t prev_entry;
@@ -4728,12 +4647,12 @@ void vm_map_simplify(map, start)
* it itself. [This assumes that attributes do not
* need to be inherited, which seems ok to me]
*/
-kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_offset_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_map_machine_attribute(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
kern_return_t ret;
@@ -4758,25 +4677,30 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
/*
* vm_map_print: [ debug ]
*/
-void vm_map_print(map)
- register vm_map_t map;
+void vm_map_print(db_expr_t addr, boolean_t have_addr, db_expr_t count, const char *modif)
{
- register vm_map_entry_t entry;
+ vm_map_t map;
+ vm_map_entry_t entry;
+
+ if (!have_addr)
+ map = current_thread()->task->map;
+ else
+ map = (vm_map_t)addr;
- iprintf("Task map 0x%X: pmap=0x%X,",
+ iprintf("Map 0x%X: pmap=0x%X,",
(vm_offset_t) map, (vm_offset_t) (map->pmap));
printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries);
printf("version=%d\n", map->timestamp);
- indent += 2;
+ indent += 1;
for (entry = vm_map_first_entry(map);
entry != vm_map_to_entry(map);
entry = entry->vme_next) {
static char *inheritance_name[3] = { "share", "copy", "none"};
iprintf("map entry 0x%X: ", (vm_offset_t) entry);
- printf("start=0x%X, end=0x%X, ",
+ printf("start=0x%X, end=0x%X\n",
(vm_offset_t) entry->vme_start, (vm_offset_t) entry->vme_end);
- printf("prot=%X/%X/%s, ",
+ iprintf("prot=%X/%X/%s, ",
entry->protection,
entry->max_protection,
inheritance_name[entry->inheritance]);
@@ -4811,13 +4735,13 @@ void vm_map_print(map)
if ((entry->vme_prev == vm_map_to_entry(map)) ||
(entry->vme_prev->object.vm_object != entry->object.vm_object)) {
- indent += 2;
+ indent += 1;
vm_object_print(entry->object.vm_object);
- indent -= 2;
+ indent -= 1;
}
}
}
- indent -= 2;
+ indent -= 1;
}
/*
@@ -4827,13 +4751,13 @@ void vm_map_print(map)
*/
void vm_map_copy_print(copy)
- vm_map_copy_t copy;
+ const vm_map_copy_t copy;
{
int i, npages;
printf("copy object 0x%x\n", copy);
- indent += 2;
+ indent += 1;
iprintf("type=%d", copy->type);
switch (copy->type) {
@@ -4887,6 +4811,6 @@ void vm_map_copy_print(copy)
break;
}
- indent -=2;
+ indent -= 1;
}
#endif /* MACH_KDB */
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 5fdac4e6..b4ba7c7b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -52,10 +52,10 @@
#include <vm/vm_types.h>
#include <kern/lock.h>
#include <kern/rbtree.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/* TODO: make it dynamic */
-#define KENTRY_DATA_SIZE (64*PAGE_SIZE)
+#define KENTRY_DATA_SIZE (256*PAGE_SIZE)
/*
* Types defined:
@@ -170,14 +170,18 @@ struct vm_map {
#define max_offset hdr.links.end /* end of range */
pmap_t pmap; /* Physical map */
vm_size_t size; /* virtual size */
+ vm_size_t user_wired; /* wired by user size */
int ref_count; /* Reference count */
decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
decl_simple_lock_data(, hint_lock) /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
- boolean_t wait_for_space; /* Should callers wait
+
+ /* Flags */
+ unsigned int wait_for_space:1, /* Should callers wait
for space? */
- boolean_t wiring_required;/* All memory wired? */
+ /* boolean_t */ wiring_required:1; /* All memory wired? */
+
unsigned int timestamp; /* Version number */
};
@@ -359,9 +363,6 @@ MACRO_END
* Exported procedures that operate on vm_map_t.
*/
-extern vm_offset_t kentry_data;
-extern vm_size_t kentry_data_size;
-extern int kentry_count;
/* Initialize the module */
extern void vm_map_init(void);
@@ -437,6 +438,23 @@ extern kern_return_t vm_map_machine_attribute(vm_map_t, vm_offset_t,
/* Delete entry from map */
extern void vm_map_entry_delete(vm_map_t, vm_map_entry_t);
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end);
+
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy);
+
+void vm_map_copy_page_discard (vm_map_copy_t copy);
+
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry); /* OUT */
+
/*
* Functions implemented as macros
*/
@@ -538,6 +556,9 @@ extern void _vm_map_clip_start(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end);
#endif /* _VM_VM_MAP_H_ */
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 526b6f33..bc301288 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -59,7 +59,6 @@
#include <ddb/db_output.h>
#endif /* MACH_KDB */
-
void memory_object_release(
ipc_port_t pager,
pager_request_t pager_request,
@@ -231,9 +230,11 @@ static void _vm_object_setup(
vm_object_t _vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
+ vm_object_t object;
object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
+ if (!object)
+ return 0;
_vm_object_setup(object, size);
@@ -243,10 +244,12 @@ vm_object_t _vm_object_allocate(
vm_object_t vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
- register ipc_port_t port;
+ vm_object_t object;
+ ipc_port_t port;
object = _vm_object_allocate(size);
+ if (object == 0)
+ panic("vm_object_allocate");
port = ipc_port_alloc_kernel();
if (port == IP_NULL)
panic("vm_object_allocate");
@@ -264,7 +267,7 @@ vm_object_t vm_object_allocate(
void vm_object_bootstrap(void)
{
kmem_cache_init(&vm_object_cache, "vm_object",
- sizeof(struct vm_object), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_object), 0, NULL, 0);
queue_init(&vm_object_cached_list);
simple_lock_init(&vm_object_cached_lock_data);
@@ -406,7 +409,7 @@ void vm_object_collect(
* Gets another reference to the given object.
*/
void vm_object_reference(
- register vm_object_t object)
+ vm_object_t object)
{
if (object == VM_OBJECT_NULL)
return;
@@ -429,7 +432,7 @@ void vm_object_reference(
* No object may be locked.
*/
void vm_object_deallocate(
- register vm_object_t object)
+ vm_object_t object)
{
vm_object_t temp;
@@ -525,10 +528,10 @@ void vm_object_deallocate(
* object will cease to exist.
*/
void vm_object_terminate(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_page_t p;
- vm_object_t shadow_object;
+ vm_page_t p;
+ vm_object_t shadow_object;
/*
* Make sure the object isn't already being terminated
@@ -577,10 +580,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.2 0x%x 0x%x",
- object, p);
-
VM_PAGE_FREE(p);
}
} else while (!queue_empty(&object->memq)) {
@@ -588,9 +587,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.3 0x%x 0x%x", object, p);
-
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(p);
vm_page_unlock_queues();
@@ -608,9 +604,6 @@ void vm_object_terminate(
goto free_page;
}
- if (p->fictitious)
- panic("vm_object_terminate.4 0x%x 0x%x", object, p);
-
if (!p->dirty)
p->dirty = pmap_is_modified(p->phys_addr);
@@ -732,7 +725,6 @@ void memory_object_release(
void vm_object_abort_activity(
vm_object_t object)
{
- register
vm_page_t p;
vm_page_t next;
@@ -786,17 +778,12 @@ void vm_object_abort_activity(
* or from port destruction handling (via vm_object_destroy).
*/
kern_return_t memory_object_destroy(
- register
vm_object_t object,
kern_return_t reason)
{
ipc_port_t old_object, old_name;
pager_request_t old_control;
-#ifdef lint
- reason++;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL)
return KERN_SUCCESS;
@@ -889,8 +876,8 @@ kern_return_t memory_object_destroy(
boolean_t vm_object_pmap_protect_by_page = FALSE;
void vm_object_pmap_protect(
- register vm_object_t object,
- register vm_offset_t offset,
+ vm_object_t object,
+ vm_offset_t offset,
vm_size_t size,
pmap_t pmap,
vm_offset_t pmap_start,
@@ -912,8 +899,8 @@ void vm_object_pmap_protect(
}
{
- register vm_page_t p;
- register vm_offset_t end;
+ vm_page_t p;
+ vm_offset_t end;
end = offset + size;
@@ -944,7 +931,7 @@ void vm_object_pmap_protect(
* Must follow shadow chain to remove access
* to pages in shadowed objects.
*/
- register vm_object_t next_object;
+ vm_object_t next_object;
next_object = object->shadow;
if (next_object != VM_OBJECT_NULL) {
@@ -981,11 +968,11 @@ void vm_object_pmap_protect(
* The object must *not* be locked.
*/
void vm_object_pmap_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p;
+ vm_page_t p;
if (object == VM_OBJECT_NULL)
return;
@@ -1031,7 +1018,6 @@ void vm_object_pmap_remove(
* VM_OBJECT_NULL.
*/
kern_return_t vm_object_copy_slowly(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1085,7 +1071,6 @@ kern_return_t vm_object_copy_slowly(
vm_prot_t prot = VM_PROT_READ;
vm_page_t _result_page;
vm_page_t top_page;
- register
vm_page_t result_page;
vm_object_lock(src_object);
@@ -1205,8 +1190,6 @@ kern_return_t vm_object_copy_slowly(
* The object should be unlocked on entry and exit.
*/
-vm_object_t vm_object_copy_delayed(); /* forward declaration */
-
boolean_t vm_object_copy_temporary(
vm_object_t *_object, /* INOUT */
vm_offset_t *_offset, /* INOUT */
@@ -1215,10 +1198,6 @@ boolean_t vm_object_copy_temporary(
{
vm_object_t object = *_object;
-#ifdef lint
- ++*_offset;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL) {
*_src_needs_copy = FALSE;
*_dst_needs_copy = FALSE;
@@ -1318,16 +1297,6 @@ kern_return_t vm_object_copy_call(
vm_page_t p;
/*
- * Set the backing object for the new
- * temporary object.
- */
-
- assert(src_object->ref_count > 0);
- src_object->ref_count++;
- vm_object_paging_begin(src_object);
- vm_object_unlock(src_object);
-
- /*
* Create a memory object port to be associated
* with this new vm_object.
*
@@ -1340,10 +1309,18 @@ kern_return_t vm_object_copy_call(
*/
new_memory_object = ipc_port_alloc_kernel();
- if (new_memory_object == IP_NULL) {
- panic("vm_object_copy_call: allocate memory object port");
- /* XXX Shouldn't panic here. */
- }
+ if (new_memory_object == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * Set the backing object for the new
+ * temporary object.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_paging_begin(src_object);
+ vm_object_unlock(src_object);
/* we hold a naked receive right for new_memory_object */
(void) ipc_port_make_send(new_memory_object);
@@ -1448,7 +1425,7 @@ vm_object_t vm_object_copy_delayed(
* synchronization required in the "push"
* operation described above.
*
- * The copy-on-write is said to be assymetric because
+ * The copy-on-write is said to be asymmetric because
* the original object is *not* marked copy-on-write.
* A copied page is pushed to the copy object, regardless
* which party attempted to modify the page.
@@ -1581,7 +1558,6 @@ vm_object_t vm_object_copy_delayed(
* and may be interrupted.
*/
kern_return_t vm_object_copy_strategically(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1694,8 +1670,8 @@ void vm_object_shadow(
vm_offset_t *offset, /* IN/OUT */
vm_size_t length)
{
- register vm_object_t source;
- register vm_object_t result;
+ vm_object_t source;
+ vm_object_t result;
source = *object;
@@ -1955,7 +1931,6 @@ vm_object_t vm_object_enter(
vm_size_t size,
boolean_t internal)
{
- register
vm_object_t object;
vm_object_t new_object;
boolean_t must_init;
@@ -2169,7 +2144,6 @@ restart:
* daemon will be using this routine.
*/
void vm_object_pager_create(
- register
vm_object_t object)
{
ipc_port_t pager;
@@ -2314,14 +2288,14 @@ boolean_t vm_object_collapse_bypass_allowed = TRUE;
* so the caller should hold a reference for the object.
*/
void vm_object_collapse(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_object_t backing_object;
- register vm_offset_t backing_offset;
- register vm_size_t size;
- register vm_offset_t new_offset;
- register vm_page_t p, pp;
- ipc_port_t old_name_port;
+ vm_object_t backing_object;
+ vm_offset_t backing_offset;
+ vm_size_t size;
+ vm_offset_t new_offset;
+ vm_page_t p, pp;
+ ipc_port_t old_name_port;
if (!vm_object_collapse_allowed)
return;
@@ -2446,34 +2420,9 @@ void vm_object_collapse(
VM_PAGE_FREE(p);
}
else {
- if (pp != VM_PAGE_NULL) {
- /*
- * Parent has an absent page...
- * it's not being paged in, so
- * it must really be missing from
- * the parent.
- *
- * Throw out the absent page...
- * any faults looking for that
- * page will restart with the new
- * one.
- */
-
- /*
- * This should never happen -- the
- * parent cannot have ever had an
- * external memory object, and thus
- * cannot have absent pages.
- */
- panic("vm_object_collapse: bad case");
-
- VM_PAGE_FREE(pp);
-
- /*
- * Fall through to move the backing
- * object's page up.
- */
- }
+ assert(pp == VM_PAGE_NULL || !
+ "vm_object_collapse: bad case");
+
/*
* Parent now has no page.
* Move the backing object's page up.
@@ -2692,11 +2641,11 @@ unsigned int vm_object_page_remove_lookup = 0;
unsigned int vm_object_page_remove_iterate = 0;
void vm_object_page_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p, next;
+ vm_page_t p, next;
/*
* One and two page removals are most popular.
@@ -2757,7 +2706,7 @@ void vm_object_page_remove(
*/
boolean_t vm_object_coalesce(
- register vm_object_t prev_object,
+ vm_object_t prev_object,
vm_object_t next_object,
vm_offset_t prev_offset,
vm_offset_t next_offset,
@@ -2766,10 +2715,6 @@ boolean_t vm_object_coalesce(
{
vm_size_t newsize;
-#ifdef lint
- next_offset++;
-#endif /* lint */
-
if (next_object != VM_OBJECT_NULL) {
return FALSE;
}
@@ -2898,7 +2843,8 @@ vm_object_page_map(
VM_PAGE_FREE(old_page);
}
- vm_page_init(m, addr);
+ vm_page_init(m);
+ m->phys_addr = addr;
m->private = TRUE; /* don`t free page */
m->wire_count = 1;
vm_page_lock_queues();
@@ -2923,20 +2869,21 @@ boolean_t vm_object_print_pages = FALSE;
void vm_object_print(
vm_object_t object)
{
- register vm_page_t p;
+ vm_page_t p;
- register int count;
+ int count;
if (object == VM_OBJECT_NULL)
return;
- iprintf("Object 0x%X: size=0x%X",
- (vm_offset_t) object, (vm_offset_t) object->size);
- printf(", %d references, %lu resident pages,", object->ref_count,
- object->resident_page_count);
+ iprintf("Object 0x%X: size=0x%X, %d references",
+ (vm_offset_t) object, (vm_offset_t) object->size,
+ object->ref_count);
+ printf("\n");
+ iprintf("%lu resident pages,", object->resident_page_count);
printf(" %d absent pages,", object->absent_count);
printf(" %d paging ops\n", object->paging_in_progress);
- indent += 2;
+ indent += 1;
iprintf("memory object=0x%X (offset=0x%X),",
(vm_offset_t) object->pager, (vm_offset_t) object->paging_offset);
printf("control=0x%X, name=0x%X\n",
@@ -2955,7 +2902,7 @@ void vm_object_print(
(vm_offset_t) object->shadow, (vm_offset_t) object->shadow_offset);
printf("copy=0x%X\n", (vm_offset_t) object->copy);
- indent += 2;
+ indent += 1;
if (vm_object_print_pages) {
count = 0;
@@ -2972,7 +2919,7 @@ void vm_object_print(
if (count != 0)
printf("\n");
}
- indent -= 4;
+ indent -= 2;
}
#endif /* MACH_KDB */
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 6b9f0bcf..eb8a0c28 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -45,7 +45,7 @@
#include <kern/lock.h>
#include <kern/assert.h>
#include <kern/debug.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <vm/pmap.h>
#include <ipc/ipc_types.h>
@@ -62,7 +62,7 @@ typedef struct ipc_port * pager_request_t;
*/
struct vm_object {
- queue_chain_t memq; /* Resident memory */
+ queue_head_t memq; /* Resident memory */
decl_simple_lock_data(, Lock) /* Synchronization */
#if VM_OBJECT_DEBUG
thread_t LockHolder; /* Thread holding Lock */
@@ -247,6 +247,16 @@ extern boolean_t vm_object_coalesce(
extern void vm_object_pager_wakeup(ipc_port_t pager);
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name);
+
+void vm_object_deactivate_pages(vm_object_t);
+
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object);
+
/*
* Event waiting handling
*/
diff --git a/vm/vm_page.c b/vm/vm_page.c
new file mode 100644
index 00000000..a868fce8
--- /dev/null
+++ b/vm/vm_page.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation uses the binary buddy system to manage its heap.
+ * Descriptions of the buddy system can be found in the following works :
+ * - "UNIX Internals: The New Frontiers", by Uresh Vahalia.
+ * - "Dynamic Storage Allocation: A Survey and Critical Review",
+ * by Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles.
+ *
+ * In addition, this allocator uses per-CPU pools of pages for order 0
+ * (i.e. single page) allocations. These pools act as caches (but are named
+ * differently to avoid confusion with CPU caches) that reduce contention on
+ * multiprocessor systems. When a pool is empty and cannot provide a page,
+ * it is filled by transferring multiple pages from the backend buddy system.
+ * The symmetric case is handled likewise.
+ */
+
+#include <string.h>
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/list.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <mach/vm_param.h>
+#include <machine/pmap.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+
+#define __init
+#define __initdata
+#define __read_mostly
+
+#define thread_pin()
+#define thread_unpin()
+
+/*
+ * Number of free block lists per segment.
+ */
+#define VM_PAGE_NR_FREE_LISTS 11
+
+/*
+ * The size of a CPU pool is computed by dividing the number of pages in its
+ * containing segment by this value.
+ */
+#define VM_PAGE_CPU_POOL_RATIO 1024
+
+/*
+ * Maximum number of pages in a CPU pool.
+ */
+#define VM_PAGE_CPU_POOL_MAX_SIZE 128
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define VM_PAGE_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Per-processor cache of pages.
+ */
+struct vm_page_cpu_pool {
+ simple_lock_data_t lock;
+ int size;
+ int transfer_size;
+ int nr_pages;
+ struct list pages;
+} __aligned(CPU_L1_SIZE);
+
+/*
+ * Special order value for pages that aren't in a free list. Such pages are
+ * either allocated, or part of a free block of pages but not the head page.
+ */
+#define VM_PAGE_ORDER_UNLISTED ((unsigned short)-1)
+
+/*
+ * Doubly-linked list of free blocks.
+ */
+struct vm_page_free_list {
+ unsigned long size;
+ struct list blocks;
+};
+
+/*
+ * Segment name buffer size.
+ */
+#define VM_PAGE_NAME_SIZE 16
+
+/*
+ * Segment of contiguous memory.
+ */
+struct vm_page_seg {
+ struct vm_page_cpu_pool cpu_pools[NCPUS];
+
+ phys_addr_t start;
+ phys_addr_t end;
+ struct vm_page *pages;
+ struct vm_page *pages_end;
+ simple_lock_data_t lock;
+ struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
+ unsigned long nr_free_pages;
+};
+
+/*
+ * Bootstrap information about a segment.
+ */
+struct vm_page_boot_seg {
+ phys_addr_t start;
+ phys_addr_t end;
+ phys_addr_t avail_start;
+ phys_addr_t avail_end;
+};
+
+static int vm_page_is_ready __read_mostly;
+
+/*
+ * Segment table.
+ *
+ * The system supports a maximum of 4 segments :
+ * - DMA: suitable for DMA
+ * - DMA32: suitable for DMA when devices support 32-bits addressing
+ * - DIRECTMAP: direct physical mapping, allows direct access from
+ * the kernel with a simple offset translation
+ * - HIGHMEM: must be mapped before it can be accessed
+ *
+ * Segments are ordered by priority, 0 being the lowest priority. Their
+ * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM. Some segments
+ * may actually be aliases for others, e.g. if DMA is always possible from
+ * the direct physical mapping, DMA and DMA32 are aliases for DIRECTMAP,
+ * in which case the segment table contains DIRECTMAP and HIGHMEM only.
+ */
+static struct vm_page_seg vm_page_segs[VM_PAGE_MAX_SEGS];
+
+/*
+ * Bootstrap segment table.
+ */
+static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata;
+
+/*
+ * Number of loaded segments.
+ */
+static unsigned int vm_page_segs_size __read_mostly;
+
+static void __init
+vm_page_init_pa(struct vm_page *page, unsigned short seg_index, phys_addr_t pa)
+{
+ memset(page, 0, sizeof(*page));
+ vm_page_init(page); /* vm_resident members */
+ page->type = VM_PT_RESERVED;
+ page->seg_index = seg_index;
+ page->order = VM_PAGE_ORDER_UNLISTED;
+ page->priv = NULL;
+ page->phys_addr = pa;
+}
+
+void
+vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
+{
+ unsigned int i, nr_pages;
+
+ nr_pages = 1 << order;
+
+ for (i = 0; i < nr_pages; i++)
+ page[i].type = type;
+}
+
+static void __init
+vm_page_free_list_init(struct vm_page_free_list *free_list)
+{
+ free_list->size = 0;
+ list_init(&free_list->blocks);
+}
+
+static inline void
+vm_page_free_list_insert(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size++;
+ list_insert_head(&free_list->blocks, &page->node);
+}
+
+static inline void
+vm_page_free_list_remove(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order != VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size--;
+ list_remove(&page->node);
+}
+
+static struct vm_page *
+vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order)
+{
+ struct vm_page_free_list *free_list = free_list;
+ struct vm_page *page, *buddy;
+ unsigned int i;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
+ free_list = &seg->free_lists[i];
+
+ if (free_list->size != 0)
+ break;
+ }
+
+ if (i == VM_PAGE_NR_FREE_LISTS)
+ return NULL;
+
+ page = list_first_entry(&free_list->blocks, struct vm_page, node);
+ vm_page_free_list_remove(free_list, page);
+ page->order = VM_PAGE_ORDER_UNLISTED;
+
+ while (i > order) {
+ i--;
+ buddy = &page[1 << i];
+ vm_page_free_list_insert(&seg->free_lists[i], buddy);
+ buddy->order = i;
+ }
+
+ seg->nr_free_pages -= (1 << order);
+ return page;
+}
+
+static void
+vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page *buddy;
+ phys_addr_t pa, buddy_pa;
+ unsigned int nr_pages;
+
+ assert(page >= seg->pages);
+ assert(page < seg->pages_end);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ nr_pages = (1 << order);
+ pa = page->phys_addr;
+
+ while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
+ buddy_pa = pa ^ vm_page_ptoa(1 << order);
+
+ if ((buddy_pa < seg->start) || (buddy_pa >= seg->end))
+ break;
+
+ buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)];
+
+ if (buddy->order != order)
+ break;
+
+ vm_page_free_list_remove(&seg->free_lists[order], buddy);
+ buddy->order = VM_PAGE_ORDER_UNLISTED;
+ order++;
+ pa &= -vm_page_ptoa(1 << order);
+ page = &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ vm_page_free_list_insert(&seg->free_lists[order], page);
+ page->order = order;
+ seg->nr_free_pages += nr_pages;
+}
+
+static void __init
+vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
+{
+ simple_lock_init(&cpu_pool->lock);
+ cpu_pool->size = size;
+ cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
+ / VM_PAGE_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->nr_pages = 0;
+ list_init(&cpu_pool->pages);
+}
+
+static inline struct vm_page_cpu_pool *
+vm_page_cpu_pool_get(struct vm_page_seg *seg)
+{
+ return &seg->cpu_pools[cpu_number()];
+}
+
+static inline struct vm_page *
+vm_page_cpu_pool_pop(struct vm_page_cpu_pool *cpu_pool)
+{
+ struct vm_page *page;
+
+ assert(cpu_pool->nr_pages != 0);
+ cpu_pool->nr_pages--;
+ page = list_first_entry(&cpu_pool->pages, struct vm_page, node);
+ list_remove(&page->node);
+ return page;
+}
+
+static inline void
+vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page)
+{
+ assert(cpu_pool->nr_pages < cpu_pool->size);
+ cpu_pool->nr_pages++;
+ list_insert_head(&cpu_pool->pages, &page->node);
+}
+
+static int
+vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == 0);
+
+ simple_lock(&seg->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ page = vm_page_seg_alloc_from_buddy(seg, 0);
+
+ if (page == NULL)
+ break;
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ }
+
+ simple_unlock(&seg->lock);
+
+ return i;
+}
+
+static void
+vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == cpu_pool->size);
+
+ simple_lock(&seg->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ }
+
+ simple_unlock(&seg->lock);
+}
+
+static phys_addr_t __init
+vm_page_seg_size(struct vm_page_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static int __init
+vm_page_seg_compute_pool_size(struct vm_page_seg *seg)
+{
+ phys_addr_t size;
+
+ size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO;
+
+ if (size == 0)
+ size = 1;
+ else if (size > VM_PAGE_CPU_POOL_MAX_SIZE)
+ size = VM_PAGE_CPU_POOL_MAX_SIZE;
+
+ return size;
+}
+
+static void __init
+vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
+ struct vm_page *pages)
+{
+ phys_addr_t pa;
+ int pool_size;
+ unsigned int i;
+
+ seg->start = start;
+ seg->end = end;
+ pool_size = vm_page_seg_compute_pool_size(seg);
+
+ for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
+ vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+
+ seg->pages = pages;
+ seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
+ simple_lock_init(&seg->lock);
+
+ for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
+ vm_page_free_list_init(&seg->free_lists[i]);
+
+ seg->nr_free_pages = 0;
+ i = seg - vm_page_segs;
+
+ for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
+ vm_page_init_pa(&pages[vm_page_atop(pa - seg->start)], i, pa);
+}
+
+static struct vm_page *
+vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order,
+ unsigned short type)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+ struct vm_page *page;
+ int filled;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == 0) {
+ filled = vm_page_cpu_pool_fill(cpu_pool, seg);
+
+ if (!filled) {
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ return NULL;
+ }
+ }
+
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ page = vm_page_seg_alloc_from_buddy(seg, order);
+ simple_unlock(&seg->lock);
+
+ if (page == NULL)
+ return NULL;
+ }
+
+ assert(page->type == VM_PT_FREE);
+ vm_page_set_type(page, order, type);
+ return page;
+}
+
+static void
+vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+
+ assert(page->type != VM_PT_FREE);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ vm_page_set_type(page, order, VM_PT_FREE);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == cpu_pool->size)
+ vm_page_cpu_pool_drain(cpu_pool, seg);
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ vm_page_seg_free_to_buddy(seg, page, order);
+ simple_unlock(&seg->lock);
+ }
+}
+
+void __init
+vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end)
+{
+ struct vm_page_boot_seg *seg;
+
+ assert(seg_index < ARRAY_SIZE(vm_page_boot_segs));
+ assert(vm_page_aligned(start));
+ assert(vm_page_aligned(end));
+ assert(vm_page_aligned(avail_start));
+ assert(vm_page_aligned(avail_end));
+ assert(start < end);
+ assert(start <= avail_start);
+ assert(avail_end <= end);
+ assert(vm_page_segs_size < ARRAY_SIZE(vm_page_boot_segs));
+
+ seg = &vm_page_boot_segs[seg_index];
+ seg->start = start;
+ seg->end = end;
+ seg->avail_start = avail_start;
+ seg->avail_end = avail_end;
+ vm_page_segs_size++;
+}
+
+int
+vm_page_ready(void)
+{
+ return vm_page_is_ready;
+}
+
+static unsigned int
+vm_page_select_alloc_seg(unsigned int selector)
+{
+ unsigned int seg_index;
+
+ switch (selector) {
+ case VM_PAGE_SEL_DMA:
+ seg_index = VM_PAGE_SEG_DMA;
+ break;
+ case VM_PAGE_SEL_DMA32:
+ seg_index = VM_PAGE_SEG_DMA32;
+ break;
+ case VM_PAGE_SEL_DIRECTMAP:
+ seg_index = VM_PAGE_SEG_DIRECTMAP;
+ break;
+ case VM_PAGE_SEL_HIGHMEM:
+ seg_index = VM_PAGE_SEG_HIGHMEM;
+ break;
+ default:
+ panic("vm_page: invalid selector");
+ }
+
+ return MIN(vm_page_segs_size - 1, seg_index);
+}
+
+static int __init
+vm_page_boot_seg_loaded(const struct vm_page_boot_seg *seg)
+{
+ return (seg->end != 0);
+}
+
+static void __init
+vm_page_check_boot_segs(void)
+{
+ unsigned int i;
+ int expect_loaded;
+
+ if (vm_page_segs_size == 0)
+ panic("vm_page: no physical memory loaded");
+
+ for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) {
+ expect_loaded = (i < vm_page_segs_size);
+
+ if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded)
+ continue;
+
+ panic("vm_page: invalid boot segment table");
+ }
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_size(struct vm_page_boot_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_avail_size(struct vm_page_boot_seg *seg)
+{
+ return seg->avail_end - seg->avail_start;
+}
+
+unsigned long __init
+vm_page_bootalloc(size_t size)
+{
+ struct vm_page_boot_seg *seg;
+ phys_addr_t pa;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(VM_PAGE_SEL_DIRECTMAP);
+ i < vm_page_segs_size;
+ i--) {
+ seg = &vm_page_boot_segs[i];
+
+ if (size <= vm_page_boot_seg_avail_size(seg)) {
+ pa = seg->avail_start;
+ seg->avail_start += vm_page_round(size);
+ return pa;
+ }
+ }
+
+ panic("vm_page: no physical memory available");
+}
+
+void __init
+vm_page_setup(void)
+{
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
+ struct vm_page *table, *page, *end;
+ size_t nr_pages, table_size;
+ unsigned long va;
+ unsigned int i;
+ phys_addr_t pa;
+
+ vm_page_check_boot_segs();
+
+ /*
+ * Compute the page table size.
+ */
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++)
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+
+ table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
+ printf("vm_page: page table size: %lu entries (%luk)\n", nr_pages,
+ table_size >> 10);
+ table = (struct vm_page *)pmap_steal_memory(table_size);
+ va = (unsigned long)table;
+
+ /*
+ * Initialize the segments, associating them to the page table. When
+ * the segments are initialized, all their pages are set allocated.
+ * Pages are then released, which populates the free lists.
+ */
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ boot_seg = &vm_page_boot_segs[i];
+ vm_page_seg_init(seg, boot_seg->start, boot_seg->end, table);
+ page = seg->pages + vm_page_atop(boot_seg->avail_start
+ - boot_seg->start);
+ end = seg->pages + vm_page_atop(boot_seg->avail_end
+ - boot_seg->start);
+
+ while (page < end) {
+ page->type = VM_PT_FREE;
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ page++;
+ }
+
+ table += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ while (va < (unsigned long)table) {
+ pa = pmap_extract(kernel_pmap, va);
+ page = vm_page_lookup_pa(pa);
+ assert((page != NULL) && (page->type == VM_PT_RESERVED));
+ page->type = VM_PT_TABLE;
+ va += PAGE_SIZE;
+ }
+
+ vm_page_is_ready = 1;
+}
+
+void __init
+vm_page_manage(struct vm_page *page)
+{
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+ assert(page->type == VM_PT_RESERVED);
+
+ vm_page_set_type(page, 0, VM_PT_FREE);
+ vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0);
+}
+
+struct vm_page *
+vm_page_lookup_pa(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end))
+ return &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ return NULL;
+}
+
+struct vm_page *
+vm_page_alloc_pa(unsigned int order, unsigned int selector, unsigned short type)
+{
+ struct vm_page *page;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) {
+ page = vm_page_seg_alloc(&vm_page_segs[i], order, type);
+
+ if (page != NULL)
+ return page;
+ }
+
+ if (type == VM_PT_PMAP)
+ panic("vm_page: unable to allocate pmap page");
+
+ return NULL;
+}
+
+void
+vm_page_free_pa(struct vm_page *page, unsigned int order)
+{
+ assert(page != NULL);
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+
+ vm_page_seg_free(&vm_page_segs[page->seg_index], page, order);
+}
+
+const char *
+vm_page_seg_name(unsigned int seg_index)
+{
+ /* Don't use a switch statement since segments can be aliased */
+ if (seg_index == VM_PAGE_SEG_HIGHMEM)
+ return "HIGHMEM";
+ else if (seg_index == VM_PAGE_SEG_DIRECTMAP)
+ return "DIRECTMAP";
+ else if (seg_index == VM_PAGE_SEG_DMA32)
+ return "DMA32";
+ else if (seg_index == VM_PAGE_SEG_DMA)
+ return "DMA";
+ else
+ panic("vm_page: invalid segment index");
+}
+
+void
+vm_page_info_all(void)
+{
+ struct vm_page_seg *seg;
+ unsigned long pages;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ pages = (unsigned long)(seg->pages_end - seg->pages);
+ printf("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n",
+ vm_page_seg_name(i), pages, pages >> (20 - PAGE_SHIFT),
+ seg->nr_free_pages, seg->nr_free_pages >> (20 - PAGE_SHIFT));
+ }
+}
+
+phys_addr_t
+vm_page_mem_size(void)
+{
+ phys_addr_t total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ total += vm_page_seg_size(&vm_page_segs[i]);
+ }
+
+ return total;
+}
+
+unsigned long
+vm_page_mem_free(void)
+{
+ unsigned long total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ total += vm_page_segs[i].nr_free_pages;
+ }
+
+ return total;
+}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 4536d1c5..f2e20a78 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -36,13 +36,14 @@
#include <mach/boolean.h>
#include <mach/vm_prot.h>
-#include <mach/vm_param.h>
+#include <machine/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
+#include <kern/log2.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/sched_prim.h> /* definitions of wait/wakeup */
#if MACH_VM_DEBUG
@@ -76,6 +77,23 @@
*/
struct vm_page {
+ /* Members used in the vm_page module only */
+ struct list node;
+ unsigned short type;
+ unsigned short seg_index;
+ unsigned short order;
+ void *priv;
+
+ /*
+ * This member is used throughout the code and may only change for
+ * fictitious pages.
+ */
+ phys_addr_t phys_addr;
+
+ /* We use an empty struct as the delimiter. */
+ struct {} vm_page_header;
+#define VM_PAGE_HEADER_SIZE offsetof(struct vm_page, vm_page_header)
+
queue_chain_t pageq; /* queue info for FIFO
* queue or free list (P) */
queue_chain_t listq; /* all pages in same object (O) */
@@ -84,7 +102,7 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into that object (O,P) */
- unsigned int wire_count:16, /* how many wired down maps use me?
+ unsigned int wire_count:15, /* how many wired down maps use me?
(O&P) */
/* boolean_t */ inactive:1, /* page is in inactive list (P) */
active:1, /* page is in active list (P) */
@@ -92,14 +110,8 @@ struct vm_page {
free:1, /* page is on free list (P) */
reference:1, /* page has been used (P) */
external:1, /* page considered external (P) */
- extcounted:1, /* page counted in ext counts (P) */
- :0; /* (force to 'long' boundary) */
-#ifdef ns32000
- int pad; /* extra space for ns32000 bit ops */
-#endif /* ns32000 */
-
- unsigned int
- /* boolean_t */ busy:1, /* page is in transit (O) */
+ extcounted:1, /* page counted in ext counts (P) */
+ busy:1, /* page is in transit (O) */
wanted:1, /* someone is waiting for page (O) */
tabled:1, /* page is in VP table (O) */
fictitious:1, /* Physical page doesn't exist (O) */
@@ -112,13 +124,10 @@ struct vm_page {
dirty:1, /* Page must be cleaned (O) */
precious:1, /* Page is precious; data must be
* returned even if clean (O) */
- overwriting:1, /* Request to unlock has been made
+ overwriting:1; /* Request to unlock has been made
* without having data. (O)
* [See vm_object_overwrite] */
- :0;
- vm_offset_t phys_addr; /* Physical address of page, passed
- * to pmap_enter (read-only) */
vm_prot_t page_lock; /* Uses prohibited by data manager (O) */
vm_prot_t unlock_request; /* Outstanding unlock request (O) */
};
@@ -147,8 +156,6 @@ struct vm_page {
*/
extern
-vm_page_t vm_page_queue_free; /* memory free queue */
-extern
vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
extern
queue_head_t vm_page_queue_active; /* active memory queue */
@@ -156,13 +163,6 @@ extern
queue_head_t vm_page_queue_inactive; /* inactive memory queue */
extern
-vm_offset_t first_phys_addr; /* physical address for first_page */
-extern
-vm_offset_t last_phys_addr; /* physical address for last_page */
-
-extern
-int vm_page_free_count; /* How many pages are free? */
-extern
int vm_page_fictitious_count;/* How many fictitious pages are free? */
extern
int vm_page_active_count; /* How many pages are active? */
@@ -208,25 +208,21 @@ extern void vm_page_bootstrap(
vm_offset_t *endp);
extern void vm_page_module_init(void);
-extern void vm_page_create(
- vm_offset_t start,
- vm_offset_t end);
extern vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset);
extern vm_page_t vm_page_grab_fictitious(void);
-extern void vm_page_release_fictitious(vm_page_t);
-extern boolean_t vm_page_convert(vm_page_t, boolean_t);
+extern boolean_t vm_page_convert(vm_page_t *, boolean_t);
extern void vm_page_more_fictitious(void);
extern vm_page_t vm_page_grab(boolean_t);
-extern void vm_page_release(vm_page_t, boolean_t);
+extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
+extern void vm_page_free_contig(vm_page_t, vm_size_t);
extern void vm_page_wait(void (*)(void));
extern vm_page_t vm_page_alloc(
vm_object_t object,
vm_offset_t offset);
extern void vm_page_init(
- vm_page_t mem,
- vm_offset_t phys_addr);
+ vm_page_t mem);
extern void vm_page_free(vm_page_t);
extern void vm_page_activate(vm_page_t);
extern void vm_page_deactivate(vm_page_t);
@@ -247,8 +243,6 @@ extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
extern void vm_page_wire(vm_page_t);
extern void vm_page_unwire(vm_page_t);
-extern void vm_set_page_size(void);
-
#if MACH_VM_DEBUG
extern unsigned int vm_page_info(
hash_info_bucket_t *info,
@@ -326,4 +320,217 @@ extern unsigned int vm_page_info(
} \
MACRO_END
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Physical page management.
+ */
+
+/*
+ * Address/page conversion and rounding macros (not inline functions to
+ * be easily usable on both virtual and physical addresses, which may not
+ * have the same type size).
+ */
+#define vm_page_atop(addr) ((addr) >> PAGE_SHIFT)
+#define vm_page_ptoa(page) ((page) << PAGE_SHIFT)
+#define vm_page_trunc(addr) P2ALIGN(addr, PAGE_SIZE)
+#define vm_page_round(addr) P2ROUND(addr, PAGE_SIZE)
+#define vm_page_aligned(addr) P2ALIGNED(addr, PAGE_SIZE)
+
+/*
+ * Segment selectors.
+ *
+ * Selector-to-segment-list translation table :
+ * DMA DMA
+ * DMA32 DMA32 DMA
+ * DIRECTMAP DIRECTMAP DMA32 DMA
+ * HIGHMEM HIGHMEM DIRECTMAP DMA32 DMA
+ */
+#define VM_PAGE_SEL_DMA 0
+#define VM_PAGE_SEL_DMA32 1
+#define VM_PAGE_SEL_DIRECTMAP 2
+#define VM_PAGE_SEL_HIGHMEM 3
+
+/*
+ * Page usage types.
+ *
+ * Failing to allocate pmap pages will cause a kernel panic.
+ * TODO Obviously, this needs to be addressed, e.g. with a reserved pool of
+ * pages.
+ */
+#define VM_PT_FREE 0 /* Page unused */
+#define VM_PT_RESERVED 1 /* Page reserved at boot time */
+#define VM_PT_TABLE 2 /* Page is part of the page table */
+#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
+#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
+#define VM_PT_STACK 5 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
+
+static inline unsigned short
+vm_page_type(const struct vm_page *page)
+{
+ return page->type;
+}
+
+void vm_page_set_type(struct vm_page *page, unsigned int order,
+ unsigned short type);
+
+static inline unsigned int
+vm_page_order(size_t size)
+{
+ return iorder2(vm_page_atop(vm_page_round(size)));
+}
+
+static inline phys_addr_t
+vm_page_to_pa(const struct vm_page *page)
+{
+ return page->phys_addr;
+}
+
+#if 0
+static inline unsigned long
+vm_page_direct_va(phys_addr_t pa)
+{
+ assert(pa < VM_PAGE_DIRECTMAP_LIMIT);
+ return ((unsigned long)pa + VM_MIN_DIRECTMAP_ADDRESS);
+}
+
+static inline phys_addr_t
+vm_page_direct_pa(unsigned long va)
+{
+ assert(va >= VM_MIN_DIRECTMAP_ADDRESS);
+ assert(va < VM_MAX_DIRECTMAP_ADDRESS);
+ return (va - VM_MIN_DIRECTMAP_ADDRESS);
+}
+
+static inline void *
+vm_page_direct_ptr(const struct vm_page *page)
+{
+ return (void *)vm_page_direct_va(vm_page_to_pa(page));
+}
+#endif
+
+/*
+ * Associate private data with a page.
+ */
+static inline void
+vm_page_set_priv(struct vm_page *page, void *priv)
+{
+ page->priv = priv;
+}
+
+static inline void *
+vm_page_get_priv(const struct vm_page *page)
+{
+ return page->priv;
+}
+
+/*
+ * Load physical memory into the vm_page module at boot time.
+ *
+ * The avail_start and avail_end parameters are used to maintain a simple
+ * heap for bootstrap allocations.
+ *
+ * All addresses must be page-aligned. Segments can be loaded in any order.
+ */
+void vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end);
+
+/*
+ * Return true if the vm_page module is completely initialized, false
+ * otherwise, in which case only vm_page_bootalloc() can be used for
+ * allocations.
+ */
+int vm_page_ready(void);
+
+/*
+ * Early allocation function.
+ *
+ * This function is used by the vm_resident module to implement
+ * pmap_steal_memory. It can be used after physical segments have been loaded
+ * and before the vm_page module is initialized.
+ */
+unsigned long vm_page_bootalloc(size_t size);
+
+/*
+ * Set up the vm_page module.
+ *
+ * Architecture-specific code must have loaded segments before calling this
+ * function. Segments must comply with the selector-to-segment-list table,
+ * e.g. HIGHMEM is loaded if and only if DIRECTMAP, DMA32 and DMA are loaded,
+ * notwithstanding segment aliasing.
+ *
+ * Once this function returns, the vm_page module is ready, and normal
+ * allocation functions can be used.
+ */
+void vm_page_setup(void);
+
+/*
+ * Make the given page managed by the vm_page module.
+ *
+ * If additional memory can be made usable after the VM system is initialized,
+ * it should be reported through this function.
+ */
+void vm_page_manage(struct vm_page *page);
+
+/*
+ * Return the page descriptor for the given physical address.
+ */
+struct vm_page * vm_page_lookup_pa(phys_addr_t pa);
+
+/*
+ * Allocate a block of 2^order physical pages.
+ *
+ * The selector is used to determine the segments from which allocation can
+ * be attempted.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+struct vm_page * vm_page_alloc_pa(unsigned int order, unsigned int selector,
+ unsigned short type);
+
+/*
+ * Release a block of 2^order physical pages.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+void vm_page_free_pa(struct vm_page *page, unsigned int order);
+
+/*
+ * Return the name of the given segment.
+ */
+const char * vm_page_seg_name(unsigned int seg_index);
+
+/*
+ * Display internal information about the module.
+ */
+void vm_page_info_all(void);
+
+/*
+ * Return the total amount of physical memory.
+ */
+phys_addr_t vm_page_mem_size(void);
+
+/*
+ * Return the amount of free (unused) pages.
+ *
+ * XXX This currently relies on the kernel being non preemptible and
+ * uniprocessor.
+ */
+unsigned long vm_page_mem_free(void);
+
#endif /* _VM_VM_PAGE_H_ */
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index eb75b975..72f96cbf 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -52,7 +52,6 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <machine/locore.h>
-#include <machine/vm_tuning.h>
@@ -83,7 +82,7 @@
* of active+inactive pages that should be inactive.
* The pageout daemon uses it to update vm_page_inactive_target.
*
- * If vm_page_free_count falls below vm_page_free_target and
+ * If the number of free pages falls below vm_page_free_target and
* vm_page_inactive_count is below vm_page_inactive_target,
* then the pageout daemon starts running.
*/
@@ -94,20 +93,20 @@
/*
* Once the pageout daemon starts running, it keeps going
- * until vm_page_free_count meets or exceeds vm_page_free_target.
+ * until the number of free pages meets or exceeds vm_page_free_target.
*/
#ifndef VM_PAGE_FREE_TARGET
-#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
+#define VM_PAGE_FREE_TARGET(free) (150 + (free) * 10 / 100)
#endif /* VM_PAGE_FREE_TARGET */
/*
- * The pageout daemon always starts running once vm_page_free_count
+ * The pageout daemon always starts running once the number of free pages
* falls below vm_page_free_min.
*/
#ifndef VM_PAGE_FREE_MIN
-#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#define VM_PAGE_FREE_MIN(free) (100 + (free) * 8 / 100)
#endif /* VM_PAGE_FREE_MIN */
/* When vm_page_external_count exceeds vm_page_external_limit,
@@ -126,18 +125,18 @@
#endif /* VM_PAGE_EXTERNAL_TARGET */
/*
- * When vm_page_free_count falls below vm_page_free_reserved,
+ * When the number of free pages falls below vm_page_free_reserved,
* only vm-privileged threads can allocate pages. vm-privilege
* allows the pageout daemon and default pager (and any other
* associated threads needed for default pageout) to continue
* operation by dipping into the reserved pool of pages. */
#ifndef VM_PAGE_FREE_RESERVED
-#define VM_PAGE_FREE_RESERVED 50
+#define VM_PAGE_FREE_RESERVED 500
#endif /* VM_PAGE_FREE_RESERVED */
/*
- * When vm_page_free_count falls below vm_pageout_reserved_internal,
+ * When the number of free pages falls below vm_pageout_reserved_internal,
* the pageout daemon no longer trusts external pagers to clean pages.
* External pagers are probably all wedged waiting for a free page.
* It forcibly double-pages dirty pages belonging to external objects,
@@ -145,11 +144,11 @@
*/
#ifndef VM_PAGEOUT_RESERVED_INTERNAL
-#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 25)
+#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 250)
#endif /* VM_PAGEOUT_RESERVED_INTERNAL */
/*
- * When vm_page_free_count falls below vm_pageout_reserved_really,
+ * When the number of free pages falls below vm_pageout_reserved_really,
* the pageout daemon stops work entirely to let the default pager
* catch up (assuming the default pager has pages to clean).
* Beyond this point, it is too dangerous to consume memory
@@ -157,12 +156,9 @@
*/
#ifndef VM_PAGEOUT_RESERVED_REALLY
-#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 40)
+#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 400)
#endif /* VM_PAGEOUT_RESERVED_REALLY */
-extern void vm_pageout_continue();
-extern void vm_pageout_scan_continue();
-
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
@@ -230,16 +226,16 @@ unsigned int vm_pageout_inactive_cleaned_external = 0;
* not busy on exit.
*/
vm_page_t
-vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
- register vm_page_t m;
- vm_offset_t paging_offset;
- register vm_object_t new_object;
- vm_offset_t new_offset;
- boolean_t flush;
+vm_pageout_setup(
+ vm_page_t m,
+ vm_offset_t paging_offset,
+ vm_object_t new_object,
+ vm_offset_t new_offset,
+ boolean_t flush)
{
- register vm_object_t old_object = m->object;
- register vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
- register vm_page_t new_m;
+ vm_object_t old_object = m->object;
+ vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
+ vm_page_t new_m;
assert(m->busy && !m->absent && !m->fictitious);
@@ -417,15 +413,15 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
* copy to a new page in a new object, if not.
*/
void
-vm_pageout_page(m, initial, flush)
- register vm_page_t m;
- boolean_t initial;
- boolean_t flush;
+vm_pageout_page(
+ vm_page_t m,
+ boolean_t initial,
+ boolean_t flush)
{
vm_map_copy_t copy;
- register vm_object_t old_object;
- register vm_object_t new_object;
- register vm_page_t holding_page;
+ vm_object_t old_object;
+ vm_object_t new_object;
+ vm_page_t holding_page;
vm_offset_t paging_offset;
kern_return_t rc;
boolean_t precious_clean;
@@ -511,7 +507,7 @@ vm_pageout_page(m, initial, flush)
* vm_page_free_wanted == 0.
*/
-void vm_pageout_scan()
+void vm_pageout_scan(void)
{
unsigned int burst_count;
unsigned int want_pages;
@@ -555,13 +551,15 @@ void vm_pageout_scan()
stack_collect();
net_kmsg_collect();
consider_task_collect();
+ if (0) /* XXX: pcb_collect doesn't do anything yet, so it is
+ pointless to call consider_thread_collect. */
consider_thread_collect();
slab_collect();
for (burst_count = 0;;) {
- register vm_page_t m;
- register vm_object_t object;
- unsigned int free_count;
+ vm_page_t m;
+ vm_object_t object;
+ unsigned long free_count;
/*
* Recalculate vm_page_inactivate_target.
@@ -578,7 +576,7 @@ void vm_pageout_scan()
while ((vm_page_inactive_count < vm_page_inactive_target) &&
!queue_empty(&vm_page_queue_active)) {
- register vm_object_t obj;
+ vm_object_t obj;
vm_pageout_active++;
m = (vm_page_t) queue_first(&vm_page_queue_active);
@@ -632,7 +630,7 @@ void vm_pageout_scan()
*/
simple_lock(&vm_page_queue_free_lock);
- free_count = vm_page_free_count;
+ free_count = vm_page_mem_free();
if ((free_count >= vm_page_free_target) &&
(vm_page_external_count <= vm_page_external_target) &&
(vm_page_free_wanted == 0)) {
@@ -695,7 +693,7 @@ void vm_pageout_scan()
if (want_pages || m->external)
break;
- m = (vm_page_t) queue_next (m);
+ m = (vm_page_t) queue_next (&m->pageq);
if (!m)
goto pause;
}
@@ -862,7 +860,7 @@ void vm_pageout_scan()
}
}
-void vm_pageout_scan_continue()
+void vm_pageout_scan_continue(void)
{
/*
* We just paused to let the pagers catch up.
@@ -893,7 +891,7 @@ void vm_pageout_scan_continue()
* vm_pageout is the high level pageout daemon.
*/
-void vm_pageout_continue()
+void vm_pageout_continue(void)
{
/*
* The pageout daemon is never done, so loop forever.
@@ -915,12 +913,13 @@ void vm_pageout_continue()
}
}
-void vm_pageout()
+void vm_pageout(void)
{
- int free_after_reserve;
+ unsigned long free_after_reserve;
current_thread()->vm_privilege = TRUE;
stack_privilege(current_thread());
+ thread_set_own_priority(0);
/*
* Initialize some paging parameters.
@@ -952,7 +951,7 @@ void vm_pageout()
vm_pageout_reserved_really =
VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved);
- free_after_reserve = vm_page_free_count - vm_page_free_reserved;
+ free_after_reserve = vm_page_mem_free() - vm_page_free_reserved;
if (vm_page_external_limit == 0)
vm_page_external_limit =
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
index d41ee30a..ea6cfaf4 100644
--- a/vm/vm_pageout.h
+++ b/vm/vm_pageout.h
@@ -44,4 +44,10 @@ extern vm_page_t vm_pageout_setup(vm_page_t, vm_offset_t, vm_object_t,
vm_offset_t, boolean_t);
extern void vm_pageout_page(vm_page_t, boolean_t, boolean_t);
+extern void vm_pageout(void) __attribute__((noreturn));
+
+extern void vm_pageout_continue(void) __attribute__((noreturn));
+
+extern void vm_pageout_scan_continue(void) __attribute__((noreturn));
+
#endif /* _VM_VM_PAGEOUT_H_ */
diff --git a/vm/vm_print.h b/vm/vm_print.h
index 69a20ba3..8a36d75e 100644
--- a/vm/vm_print.h
+++ b/vm/vm_print.h
@@ -1,3 +1,21 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
#ifndef VM_PRINT_H
#define VM_PRINT_H
@@ -5,10 +23,11 @@
#include <machine/db_machdep.h>
/* Debugging: print a map */
-extern void vm_map_print(vm_map_t);
+extern void vm_map_print(db_expr_t addr, boolean_t have_addr,
+ db_expr_t count, const char *modif);
/* Pretty-print a copy object for ddb. */
-extern void vm_map_copy_print(vm_map_copy_t);
+extern void vm_map_copy_print(const vm_map_copy_t);
#include <vm/vm_object.h>
@@ -16,7 +35,7 @@ extern void vm_object_print(vm_object_t);
#include <vm/vm_page.h>
-extern void vm_page_print(vm_page_t);
+extern void vm_page_print(const vm_page_t);
#endif /* VM_PRINT_H */
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 66ab51f0..fa7a337b 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -27,7 +27,7 @@
* the rights to redistribute these changes.
*/
/*
- * File: vm/vm_page.c
+ * File: vm/vm_resident.c
* Author: Avadis Tevanian, Jr., Michael Wayne Young
*
* Resident memory management module.
@@ -65,14 +65,14 @@
/*
- * Associated with eacn page of user-allocatable memory is a
+ * Associated with each page of user-allocatable memory is a
* page structure.
*/
/*
* These variables record the values returned by vm_page_bootstrap,
* for debugging purposes. The implementation of pmap_steal_memory
- * and pmap_startup here also uses them internally.
+ * here also uses them internally.
*/
vm_offset_t virtual_space_start;
@@ -95,29 +95,18 @@ vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
unsigned int vm_page_bucket_count = 0; /* How big is array? */
unsigned int vm_page_hash_mask; /* Mask for hash function */
-/*
- * Resident page structures are initialized from
- * a template (see vm_page_alloc).
- *
- * When adding a new field to the virtual memory
- * object structure, be sure to add initialization
- * (see vm_page_bootstrap).
- */
-struct vm_page vm_page_template;
-
-/*
- * Resident pages that represent real memory
- * are allocated from a free list.
- */
-vm_page_t vm_page_queue_free;
vm_page_t vm_page_queue_fictitious;
decl_simple_lock_data(,vm_page_queue_free_lock)
unsigned int vm_page_free_wanted;
-int vm_page_free_count;
int vm_page_fictitious_count;
int vm_page_external_count;
-unsigned int vm_page_free_count_minimum; /* debugging */
+/*
+ * This variable isn't directly used. It's merely a placeholder for the
+ * address used to synchronize threads waiting for pages to become
+ * available. The real value is returned by vm_page_free_mem().
+ */
+unsigned int vm_page_free_avail;
/*
* Occasionally, the virtual memory system uses
@@ -192,48 +181,15 @@ void vm_page_bootstrap(
vm_offset_t *startp,
vm_offset_t *endp)
{
- register vm_page_t m;
int i;
/*
- * Initialize the vm_page template.
- */
-
- m = &vm_page_template;
- m->object = VM_OBJECT_NULL; /* reset later */
- m->offset = 0; /* reset later */
- m->wire_count = 0;
-
- m->inactive = FALSE;
- m->active = FALSE;
- m->laundry = FALSE;
- m->free = FALSE;
- m->external = FALSE;
-
- m->busy = TRUE;
- m->wanted = FALSE;
- m->tabled = FALSE;
- m->fictitious = FALSE;
- m->private = FALSE;
- m->absent = FALSE;
- m->error = FALSE;
- m->dirty = FALSE;
- m->precious = FALSE;
- m->reference = FALSE;
-
- m->phys_addr = 0; /* reset later */
-
- m->page_lock = VM_PROT_NONE;
- m->unlock_request = VM_PROT_NONE;
-
- /*
* Initialize the page queues.
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
- vm_page_queue_free = VM_PAGE_NULL;
vm_page_queue_fictitious = VM_PAGE_NULL;
queue_init(&vm_page_queue_active);
queue_init(&vm_page_queue_inactive);
@@ -241,12 +197,6 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the kernel map entries.
- */
-
- kentry_data = pmap_steal_memory(kentry_data_size);
-
- /*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
*
@@ -274,35 +224,25 @@ void vm_page_bootstrap(
sizeof(vm_page_bucket_t));
for (i = 0; i < vm_page_bucket_count; i++) {
- register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
bucket->pages = VM_PAGE_NULL;
simple_lock_init(&bucket->lock);
}
- /*
- * Machine-dependent code allocates the resident page table.
- * It uses vm_page_init to initialize the page frames.
- * The code also returns to us the virtual space available
- * to the kernel. We don't trust the pmap module
- * to get the alignment right.
- */
+ vm_page_setup();
- pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
*startp = virtual_space_start;
*endp = virtual_space_end;
-
- /* printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);*/
- vm_page_free_count_minimum = vm_page_free_count;
}
#ifndef MACHINE_PAGES
/*
- * We implement pmap_steal_memory and pmap_startup with the help
- * of two simpler functions, pmap_virtual_space and pmap_next_page.
+ * We implement pmap_steal_memory with the help
+ * of two simpler functions, pmap_virtual_space and vm_page_bootalloc.
*/
vm_offset_t pmap_steal_memory(
@@ -310,11 +250,7 @@ vm_offset_t pmap_steal_memory(
{
vm_offset_t addr, vaddr, paddr;
- /*
- * We round the size to an integer multiple.
- */
-
- size = (size + 3) &~ 3;
+ size = round_page(size);
/*
* If this is the first call to pmap_steal_memory,
@@ -347,8 +283,7 @@ vm_offset_t pmap_steal_memory(
for (vaddr = round_page(addr);
vaddr < addr + size;
vaddr += PAGE_SIZE) {
- if (!pmap_next_page(&paddr))
- panic("pmap_steal_memory");
+ paddr = vm_page_bootalloc(PAGE_SIZE);
/*
* XXX Logically, these mappings should be wired,
@@ -361,64 +296,6 @@ vm_offset_t pmap_steal_memory(
return addr;
}
-
-void pmap_startup(
- vm_offset_t *startp,
- vm_offset_t *endp)
-{
- unsigned int i, npages, pages_initialized;
- vm_page_t pages;
- vm_offset_t paddr;
-
- /*
- * We calculate how many page frames we will have
- * and then allocate the page structures in one chunk.
- */
-
- npages = ((PAGE_SIZE * pmap_free_pages() +
- (round_page(virtual_space_start) - virtual_space_start)) /
- (PAGE_SIZE + sizeof *pages));
-
- pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
-
- /*
- * Initialize the page frames.
- */
-
- for (i = 0, pages_initialized = 0; i < npages; i++) {
- if (!pmap_next_page(&paddr))
- break;
-
- vm_page_init(&pages[i], paddr);
- pages_initialized++;
- }
- i = 0;
- while (pmap_next_page(&paddr))
- i++;
- if (i)
- printf("%u memory page(s) left away\n", i);
-
- /*
- * Release pages in reverse order so that physical pages
- * initially get allocated in ascending addresses. This keeps
- * the devices (which must address physical memory) happy if
- * they require several consecutive pages.
- */
-
- for (i = pages_initialized; i > 0; i--) {
- vm_page_release(&pages[i - 1], FALSE);
- }
-
- /*
- * We have to re-align virtual_space_start,
- * because pmap_steal_memory has been using it.
- */
-
- virtual_space_start = round_page(virtual_space_start);
-
- *startp = virtual_space_start;
- *endp = virtual_space_end;
-}
#endif /* MACHINE_PAGES */
/*
@@ -430,35 +307,7 @@ void pmap_startup(
void vm_page_module_init(void)
{
kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
- NULL, NULL, NULL, 0);
-}
-
-/*
- * Routine: vm_page_create
- * Purpose:
- * After the VM system is up, machine-dependent code
- * may stumble across more physical memory. For example,
- * memory that it was reserving for a frame buffer.
- * vm_page_create turns this memory into available pages.
- */
-
-void vm_page_create(
- vm_offset_t start,
- vm_offset_t end)
-{
- vm_offset_t paddr;
- vm_page_t m;
-
- for (paddr = round_page(start);
- paddr < trunc_page(end);
- paddr += PAGE_SIZE) {
- m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
- if (m == VM_PAGE_NULL)
- panic("vm_page_create");
-
- vm_page_init(m, paddr);
- vm_page_release(m, FALSE);
- }
+ NULL, 0);
}
/*
@@ -483,11 +332,11 @@ void vm_page_create(
*/
void vm_page_insert(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -555,11 +404,11 @@ void vm_page_insert(
*/
void vm_page_replace(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -582,7 +431,7 @@ void vm_page_replace(
simple_lock(&bucket->lock);
if (bucket->pages) {
vm_page_t *mp = &bucket->pages;
- register vm_page_t m = *mp;
+ vm_page_t m = *mp;
do {
if (m->object == object && m->offset == offset) {
/*
@@ -646,10 +495,10 @@ void vm_page_replace(
*/
void vm_page_remove(
- register vm_page_t mem)
+ vm_page_t mem)
{
- register vm_page_bucket_t *bucket;
- register vm_page_t this;
+ vm_page_bucket_t *bucket;
+ vm_page_t this;
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -665,7 +514,7 @@ void vm_page_remove(
bucket->pages = mem->next;
} else {
- register vm_page_t *prev;
+ vm_page_t *prev;
for (prev = &this->next;
(this = *prev) != mem;
@@ -704,11 +553,11 @@ void vm_page_remove(
*/
vm_page_t vm_page_lookup(
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_t mem;
- register vm_page_bucket_t *bucket;
+ vm_page_t mem;
+ vm_page_bucket_t *bucket;
/*
* Search the hash table for this object/offset pair
@@ -735,9 +584,9 @@ vm_page_t vm_page_lookup(
* The object must be locked.
*/
void vm_page_rename(
- register vm_page_t mem,
- register vm_object_t new_object,
- vm_offset_t new_offset)
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset)
{
/*
* Changes to mem->object require the page lock because
@@ -750,6 +599,33 @@ void vm_page_rename(
vm_page_unlock_queues();
}
+static void vm_page_init_template(vm_page_t m)
+{
+ m->object = VM_OBJECT_NULL; /* reset later */
+ m->offset = 0; /* reset later */
+ m->wire_count = 0;
+
+ m->inactive = FALSE;
+ m->active = FALSE;
+ m->laundry = FALSE;
+ m->free = FALSE;
+ m->external = FALSE;
+
+ m->busy = TRUE;
+ m->wanted = FALSE;
+ m->tabled = FALSE;
+ m->fictitious = FALSE;
+ m->private = FALSE;
+ m->absent = FALSE;
+ m->error = FALSE;
+ m->dirty = FALSE;
+ m->precious = FALSE;
+ m->reference = FALSE;
+
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+}
+
/*
* vm_page_init:
*
@@ -758,11 +634,9 @@ void vm_page_rename(
* so that it can be given to vm_page_release or vm_page_insert.
*/
void vm_page_init(
- vm_page_t mem,
- vm_offset_t phys_addr)
+ vm_page_t mem)
{
- *mem = vm_page_template;
- mem->phys_addr = phys_addr;
+ vm_page_init_template(mem);
}
/*
@@ -774,7 +648,7 @@ void vm_page_init(
vm_page_t vm_page_grab_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
simple_lock(&vm_page_queue_free_lock);
m = vm_page_queue_fictitious;
@@ -794,8 +668,8 @@ vm_page_t vm_page_grab_fictitious(void)
* Release a fictitious page to the free list.
*/
-void vm_page_release_fictitious(
- register vm_page_t m)
+static void vm_page_release_fictitious(
+ vm_page_t m)
{
simple_lock(&vm_page_queue_free_lock);
if (m->free)
@@ -818,7 +692,7 @@ int vm_page_fictitious_quantum = 5;
void vm_page_more_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
int i;
for (i = 0; i < vm_page_fictitious_quantum; i++) {
@@ -826,7 +700,8 @@ void vm_page_more_fictitious(void)
if (m == VM_PAGE_NULL)
panic("vm_page_more_fictitious");
- vm_page_init(m, vm_page_fictitious_addr);
+ vm_page_init(m);
+ m->phys_addr = vm_page_fictitious_addr;
m->fictitious = TRUE;
vm_page_release_fictitious(m);
}
@@ -836,25 +711,46 @@ void vm_page_more_fictitious(void)
* vm_page_convert:
*
* Attempt to convert a fictitious page into a real page.
+ *
+ * The object referenced by *MP must be locked.
*/
boolean_t vm_page_convert(
- register vm_page_t m,
+ struct vm_page **mp,
boolean_t external)
{
- register vm_page_t real_m;
+ struct vm_page *real_m, *fict_m;
+ vm_object_t object;
+ vm_offset_t offset;
+
+ fict_m = *mp;
+
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+ assert(!fict_m->active);
+ assert(!fict_m->inactive);
real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
return FALSE;
- m->phys_addr = real_m->phys_addr;
- m->fictitious = FALSE;
+ object = fict_m->object;
+ offset = fict_m->offset;
+ vm_page_remove(fict_m);
- real_m->phys_addr = vm_page_fictitious_addr;
- real_m->fictitious = TRUE;
+ memcpy(&real_m->vm_page_header,
+ &fict_m->vm_page_header,
+ sizeof(*fict_m) - VM_PAGE_HEADER_SIZE);
+ real_m->fictitious = FALSE;
- vm_page_release_fictitious(real_m);
+ vm_page_insert(real_m, object, offset);
+
+ assert(real_m->phys_addr != vm_page_fictitious_addr);
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+
+ vm_page_release_fictitious(fict_m);
+ *mp = real_m;
return TRUE;
}
@@ -868,7 +764,7 @@ boolean_t vm_page_convert(
vm_page_t vm_page_grab(
boolean_t external)
{
- register vm_page_t mem;
+ vm_page_t mem;
simple_lock(&vm_page_queue_free_lock);
@@ -878,7 +774,7 @@ vm_page_t vm_page_grab(
* for externally-managed pages.
*/
- if (((vm_page_free_count < vm_page_free_reserved)
+ if (((vm_page_mem_free() < vm_page_free_reserved)
|| (external
&& (vm_page_external_count > vm_page_external_limit)))
&& !current_thread()->vm_privilege) {
@@ -886,15 +782,16 @@ vm_page_t vm_page_grab(
return VM_PAGE_NULL;
}
- if (vm_page_queue_free == VM_PAGE_NULL)
- panic("vm_page_grab");
+ mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
+
+ if (mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return NULL;
+ }
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
if (external)
vm_page_external_count++;
- mem = vm_page_queue_free;
- vm_page_queue_free = (vm_page_t) mem->pageq.next;
+
mem->free = FALSE;
mem->extcounted = mem->external = external;
simple_unlock(&vm_page_queue_free_lock);
@@ -910,15 +807,15 @@ vm_page_t vm_page_grab(
* it doesn't really matter.
*/
- if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
+ if ((vm_page_mem_free() < vm_page_free_min) ||
+ ((vm_page_mem_free() < vm_page_free_target) &&
(vm_page_inactive_count < vm_page_inactive_target)))
thread_wakeup((event_t) &vm_page_free_wanted);
return mem;
}
-vm_offset_t vm_page_grab_phys_addr()
+vm_offset_t vm_page_grab_phys_addr(void)
{
vm_page_t p = vm_page_grab(FALSE);
if (p == VM_PAGE_NULL)
@@ -928,208 +825,92 @@ vm_offset_t vm_page_grab_phys_addr()
}
/*
- * vm_page_grab_contiguous_pages:
- *
- * Take N pages off the free list, the pages should
- * cover a contiguous range of physical addresses.
- * [Used by device drivers to cope with DMA limitations]
+ * vm_page_release:
*
- * Returns the page descriptors in ascending order, or
- * Returns KERN_RESOURCE_SHORTAGE if it could not.
+ * Return a page to the free list.
*/
-/* Biggest phys page number for the pages we handle in VM */
-
-vm_size_t vm_page_big_pagenum = 0; /* Set this before call! */
-
-kern_return_t
-vm_page_grab_contiguous_pages(
- int npages,
- vm_page_t pages[],
- natural_t *bits,
- boolean_t external)
+static void vm_page_release(
+ vm_page_t mem,
+ boolean_t external)
{
- register int first_set;
- int size, alloc_size;
- kern_return_t ret;
- vm_page_t mem, *prevmemp;
-
-#ifndef NBBY
-#define NBBY 8 /* size in bits of sizeof()`s unity */
-#endif
-
-#define NBPEL (sizeof(natural_t)*NBBY)
-
- size = (vm_page_big_pagenum + NBPEL - 1)
- & ~(NBPEL - 1); /* in bits */
-
- size = size / NBBY; /* in bytes */
-
- /*
- * If we are called before the VM system is fully functional
- * the invoker must provide us with the work space. [one bit
- * per page starting at phys 0 and up to vm_page_big_pagenum]
- */
- if (bits == 0) {
- alloc_size = round_page(size);
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&bits,
- alloc_size)
- != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
- } else
- alloc_size = 0;
-
- memset(bits, 0, size);
-
- /*
- * A very large granularity call, its rare so that is ok
- */
simple_lock(&vm_page_queue_free_lock);
+ if (mem->free)
+ panic("vm_page_release");
+ mem->free = TRUE;
+ vm_page_free_pa(mem, 0);
+ if (external)
+ vm_page_external_count--;
/*
- * Do not dip into the reserved pool.
- */
-
- if ((vm_page_free_count < vm_page_free_reserved)
- || (vm_page_external_count >= vm_page_external_limit)) {
- printf_once("no more room for vm_page_grab_contiguous_pages");
- simple_unlock(&vm_page_queue_free_lock);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- /*
- * First pass through, build a big bit-array of
- * the pages that are free. It is not going to
- * be too large anyways, in 4k we can fit info
- * for 32k pages.
+ * Check if we should wake up someone waiting for page.
+ * But don't bother waking them unless they can allocate.
+ *
+ * We wakeup only one thread, to prevent starvation.
+ * Because the scheduling system handles wait queues FIFO,
+ * if we wakeup all waiting threads, one greedy thread
+ * can starve multiple niceguy threads. When the threads
+ * all wakeup, the greedy threads runs first, grabs the page,
+ * and waits for another page. It will be the first to run
+ * when the next page is freed.
+ *
+ * However, there is a slight danger here.
+ * The thread we wake might not use the free page.
+ * Then the other threads could wait indefinitely
+ * while the page goes unused. To forestall this,
+ * the pageout daemon will keep making free pages
+ * as long as vm_page_free_wanted is non-zero.
*/
- mem = vm_page_queue_free;
- while (mem) {
- register int word_index, bit_index;
-
- bit_index = (mem->phys_addr >> PAGE_SHIFT);
- word_index = bit_index / NBPEL;
- bit_index = bit_index - (word_index * NBPEL);
- bits[word_index] |= 1 << bit_index;
- mem = (vm_page_t) mem->pageq.next;
+ if ((vm_page_free_wanted > 0) &&
+ (vm_page_mem_free() >= vm_page_free_reserved)) {
+ vm_page_free_wanted--;
+ thread_wakeup_one((event_t) &vm_page_free_avail);
}
- /*
- * Second loop. Scan the bit array for NPAGES
- * contiguous bits. That gives us, if any,
- * the range of pages we will be grabbing off
- * the free list.
- */
- {
- register int bits_so_far = 0, i;
+ simple_unlock(&vm_page_queue_free_lock);
+}
- first_set = 0;
+/*
+ * vm_page_grab_contig:
+ *
+ * Remove a block of contiguous pages from the free list.
+ * Returns VM_PAGE_NULL if the request fails.
+ */
- for (i = 0; i < size; i += sizeof(natural_t)) {
+vm_page_t vm_page_grab_contig(
+ vm_size_t size,
+ unsigned int selector)
+{
+ unsigned int i, order, nr_pages;
+ vm_page_t mem;
- register natural_t v = bits[i / sizeof(natural_t)];
- register int bitpos;
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
- /*
- * Bitscan this one word
- */
- if (v) {
- /*
- * keep counting them beans ?
- */
- bitpos = 0;
+ simple_lock(&vm_page_queue_free_lock);
- if (bits_so_far) {
-count_ones:
- while (v & 1) {
- bitpos++;
- /*
- * got enough beans ?
- */
- if (++bits_so_far == npages)
- goto found_em;
- v >>= 1;
- }
- /* if we are being lucky, roll again */
- if (bitpos == NBPEL)
- continue;
- }
+ /*
+ * Only let privileged threads (involved in pageout)
+ * dip into the reserved pool or exceed the limit
+ * for externally-managed pages.
+ */
- /*
- * search for beans here
- */
- bits_so_far = 0;
- while ((bitpos < NBPEL) && ((v & 1) == 0)) {
- bitpos++;
- v >>= 1;
- }
- if (v & 1) {
- first_set = (i * NBBY) + bitpos;
- goto count_ones;
- }
- }
- /*
- * No luck
- */
- bits_so_far = 0;
- }
+ if (((vm_page_mem_free() - nr_pages) <= vm_page_free_reserved)
+ && !current_thread()->vm_privilege) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return VM_PAGE_NULL;
}
- /*
- * We could not find enough contiguous pages.
- */
- simple_unlock(&vm_page_queue_free_lock);
+ /* TODO Allow caller to pass type */
+ mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL);
- printf_once("no contiguous room for vm_page_grab_contiguous_pages");
- ret = KERN_RESOURCE_SHORTAGE;
- goto out;
+ if (mem == NULL)
+ panic("vm_page_grab_contig");
- /*
- * Final pass. Now we know which pages we want.
- * Scan the list until we find them all, grab
- * pages as we go. FIRST_SET tells us where
- * in the bit-array our pages start.
- */
-found_em:
- vm_page_free_count -= npages;
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
- if (external)
- vm_page_external_count += npages;
- {
- register vm_offset_t first_phys, last_phys;
-
- /* cache values for compare */
- first_phys = first_set << PAGE_SHIFT;
- last_phys = first_phys + (npages << PAGE_SHIFT);/* not included */
-
- /* running pointers */
- mem = vm_page_queue_free;
- prevmemp = &vm_page_queue_free;
-
- while (mem) {
-
- register vm_offset_t addr;
-
- addr = mem->phys_addr;
-
- if ((addr >= first_phys) &&
- (addr < last_phys)) {
- *prevmemp = (vm_page_t) mem->pageq.next;
- pages[(addr - first_phys) >> PAGE_SHIFT] = mem;
- mem->free = FALSE;
- mem->extcounted = mem->external = external;
- /*
- * Got them all ?
- */
- if (--npages == 0) break;
- } else
- prevmemp = (vm_page_t *) &mem->pageq.next;
-
- mem = (vm_page_t) mem->pageq.next;
- }
+ for (i = 0; i < nr_pages; i++) {
+ mem[i].free = FALSE;
+ mem[i].extcounted = mem[i].external = 0;
}
simple_unlock(&vm_page_queue_free_lock);
@@ -1145,63 +926,42 @@ found_em:
* it doesn't really matter.
*/
- if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
+ if ((vm_page_mem_free() < vm_page_free_min) ||
+ ((vm_page_mem_free() < vm_page_free_target) &&
(vm_page_inactive_count < vm_page_inactive_target)))
- thread_wakeup(&vm_page_free_wanted);
-
- ret = KERN_SUCCESS;
-out:
- if (alloc_size)
- kmem_free(kernel_map, (vm_offset_t) bits, alloc_size);
+ thread_wakeup((event_t) &vm_page_free_wanted);
- return ret;
+ return mem;
}
/*
- * vm_page_release:
+ * vm_page_free_contig:
*
- * Return a page to the free list.
+ * Return a block of contiguous pages to the free list.
*/
-void vm_page_release(
- register vm_page_t mem,
- boolean_t external)
+void vm_page_free_contig(vm_page_t mem, vm_size_t size)
{
+ unsigned int i, order, nr_pages;
+
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
+
simple_lock(&vm_page_queue_free_lock);
- if (mem->free)
- panic("vm_page_release");
- mem->free = TRUE;
- mem->pageq.next = (queue_entry_t) vm_page_queue_free;
- vm_page_queue_free = mem;
- vm_page_free_count++;
- if (external)
- vm_page_external_count--;
- /*
- * Check if we should wake up someone waiting for page.
- * But don't bother waking them unless they can allocate.
- *
- * We wakeup only one thread, to prevent starvation.
- * Because the scheduling system handles wait queues FIFO,
- * if we wakeup all waiting threads, one greedy thread
- * can starve multiple niceguy threads. When the threads
- * all wakeup, the greedy threads runs first, grabs the page,
- * and waits for another page. It will be the first to run
- * when the next page is freed.
- *
- * However, there is a slight danger here.
- * The thread we wake might not use the free page.
- * Then the other threads could wait indefinitely
- * while the page goes unused. To forestall this,
- * the pageout daemon will keep making free pages
- * as long as vm_page_free_wanted is non-zero.
- */
+ for (i = 0; i < nr_pages; i++) {
+ if (mem[i].free)
+ panic("vm_page_free_contig");
+
+ mem[i].free = TRUE;
+ }
+
+ vm_page_free_pa(mem, order);
if ((vm_page_free_wanted > 0) &&
- (vm_page_free_count >= vm_page_free_reserved)) {
+ (vm_page_mem_free() >= vm_page_free_reserved)) {
vm_page_free_wanted--;
- thread_wakeup_one((event_t) &vm_page_free_count);
+ thread_wakeup_one((event_t) &vm_page_free_avail);
}
simple_unlock(&vm_page_queue_free_lock);
@@ -1227,11 +987,11 @@ void vm_page_wait(
*/
simple_lock(&vm_page_queue_free_lock);
- if ((vm_page_free_count < vm_page_free_target)
+ if ((vm_page_mem_free() < vm_page_free_target)
|| (vm_page_external_count > vm_page_external_limit)) {
if (vm_page_free_wanted++ == 0)
thread_wakeup((event_t)&vm_page_free_wanted);
- assert_wait((event_t)&vm_page_free_count, FALSE);
+ assert_wait((event_t)&vm_page_free_avail, FALSE);
simple_unlock(&vm_page_queue_free_lock);
if (continuation != 0) {
counter(c_vm_page_wait_block_user++);
@@ -1257,7 +1017,7 @@ vm_page_t vm_page_alloc(
vm_object_t object,
vm_offset_t offset)
{
- register vm_page_t mem;
+ vm_page_t mem;
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
@@ -1279,7 +1039,7 @@ vm_page_t vm_page_alloc(
* Object and page queues must be locked prior to entry.
*/
void vm_page_free(
- register vm_page_t mem)
+ vm_page_t mem)
{
if (mem->free)
panic("vm_page_free");
@@ -1310,12 +1070,13 @@ void vm_page_free(
*/
if (mem->private || mem->fictitious) {
- vm_page_init(mem, vm_page_fictitious_addr);
+ vm_page_init(mem);
+ mem->phys_addr = vm_page_fictitious_addr;
mem->fictitious = TRUE;
vm_page_release_fictitious(mem);
} else {
int external = mem->external && mem->extcounted;
- vm_page_init(mem, mem->phys_addr);
+ vm_page_init(mem);
vm_page_release(mem, external);
}
}
@@ -1330,7 +1091,7 @@ void vm_page_free(
* The page's object and the page queues must be locked.
*/
void vm_page_wire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1351,7 +1112,7 @@ void vm_page_wire(
* The page's object and the page queues must be locked.
*/
void vm_page_unwire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1374,7 +1135,7 @@ void vm_page_unwire(
* The page queues must be locked.
*/
void vm_page_deactivate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1408,7 +1169,7 @@ void vm_page_deactivate(
*/
void vm_page_activate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1505,10 +1266,10 @@ vm_page_info(
* Routine: vm_page_print [exported]
*/
void vm_page_print(p)
- vm_page_t p;
+ const vm_page_t p;
{
iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
- printf(" offset 0x%X", (vm_offset_t) p->offset);
+ printf(" offset 0x%X", p->offset);
printf("wire_count %d,", p->wire_count);
printf(" %s",
(p->active ? "active" : (p->inactive ? "inactive" : "loose")));
@@ -1533,7 +1294,7 @@ void vm_page_print(p)
printf("%s,",
(p->tabled ? "" : "not_tabled"));
printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
- (vm_offset_t) p->phys_addr,
+ p->phys_addr,
(vm_offset_t) p->page_lock,
(vm_offset_t) p->unlock_request);
}
diff --git a/vm/vm_resident.h b/vm/vm_resident.h
index 67f1807f..e8bf6818 100644
--- a/vm/vm_resident.h
+++ b/vm/vm_resident.h
@@ -38,8 +38,8 @@
* The object and page must be locked.
*/
extern void vm_page_replace (
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset);
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
#endif /* _VM_RESIDENT_H_ */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 6fe398e0..e65f6d5f 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -56,11 +56,11 @@ vm_statistics_data_t vm_stat;
* vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
-kern_return_t vm_allocate(map, addr, size, anywhere)
- register vm_map_t map;
- register vm_offset_t *addr;
- register vm_size_t size;
- boolean_t anywhere;
+kern_return_t vm_allocate(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t anywhere)
{
kern_return_t result;
@@ -97,10 +97,10 @@ kern_return_t vm_allocate(map, addr, size, anywhere)
* vm_deallocate deallocates the specified range of addresses in the
* specified address map.
*/
-kern_return_t vm_deallocate(map, start, size)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
+kern_return_t vm_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -115,11 +115,11 @@ kern_return_t vm_deallocate(map, start, size)
* vm_inherit sets the inheritance of the specified range in the
* specified map.
*/
-kern_return_t vm_inherit(map, start, size, new_inheritance)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- vm_inherit_t new_inheritance;
+kern_return_t vm_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_inherit_t new_inheritance)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -149,12 +149,12 @@ kern_return_t vm_inherit(map, start, size, new_inheritance)
* specified map.
*/
-kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- boolean_t set_maximum;
- vm_prot_t new_protection;
+kern_return_t vm_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ boolean_t set_maximum,
+ vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) ||
(new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
@@ -172,9 +172,9 @@ kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
set_maximum));
}
-kern_return_t vm_statistics(map, stat)
- vm_map_t map;
- vm_statistics_data_t *stat;
+kern_return_t vm_statistics(
+ vm_map_t map,
+ vm_statistics_data_t *stat)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -182,7 +182,7 @@ kern_return_t vm_statistics(map, stat)
*stat = vm_stat;
stat->pagesize = PAGE_SIZE;
- stat->free_count = vm_page_free_count;
+ stat->free_count = vm_page_mem_free();
stat->active_count = vm_page_active_count;
stat->inactive_count = vm_page_inactive_count;
stat->wire_count = vm_page_wire_count;
@@ -217,15 +217,13 @@ kern_return_t vm_cache_statistics(
* Handle machine-specific attributes for a mapping, such
* as cachability, migrability, etc.
*/
-kern_return_t vm_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_machine_attribute(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
- extern kern_return_t vm_map_machine_attribute();
-
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -237,12 +235,12 @@ kern_return_t vm_machine_attribute(map, address, size, attribute, value)
return vm_map_machine_attribute(map, address, size, attribute, value);
}
-kern_return_t vm_read(map, address, size, data, data_size)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- pointer_t *data;
- vm_size_t *data_size;
+kern_return_t vm_read(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ pointer_t *data,
+ vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
@@ -261,11 +259,11 @@ kern_return_t vm_read(map, address, size, data, data_size)
return(error);
}
-kern_return_t vm_write(map, address, data, size)
- vm_map_t map;
- vm_address_t address;
- pointer_t data;
- vm_size_t size;
+kern_return_t vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
@@ -274,11 +272,11 @@ kern_return_t vm_write(map, address, data, size)
FALSE /* interruptible XXX */);
}
-kern_return_t vm_copy(map, source_address, size, dest_address)
- vm_map_t map;
- vm_address_t source_address;
- vm_size_t size;
- vm_address_t dest_address;
+kern_return_t vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
@@ -306,26 +304,19 @@ kern_return_t vm_copy(map, source_address, size, dest_address)
* Routine: vm_map
*/
kern_return_t vm_map(
- target_map,
- address, size, mask, anywhere,
- memory_object, offset,
- copy,
- cur_protection, max_protection, inheritance)
- vm_map_t target_map;
- vm_offset_t *address;
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- ipc_port_t memory_object;
- vm_offset_t offset;
- boolean_t copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ ipc_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register
vm_object_t object;
- register
kern_return_t result;
if ((target_map == VM_MAP_NULL) ||
@@ -414,15 +405,29 @@ kern_return_t vm_map(
*
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
-kern_return_t vm_wire(host, map, start, size, access)
- host_t host;
- register vm_map_t map;
+kern_return_t vm_wire(port, map, start, size, access)
+ const ipc_port_t port;
+ vm_map_t map;
vm_offset_t start;
vm_size_t size;
vm_prot_t access;
{
- if (host == HOST_NULL)
+ boolean_t priv;
+
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ (ip_kotype(port) != IKOT_HOST_PRIV
+ && ip_kotype(port) != IKOT_HOST))
+ {
+ ip_unlock(port);
return KERN_INVALID_HOST;
+ }
+
+ priv = ip_kotype(port) == IKOT_HOST_PRIV;
+ ip_unlock(port);
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
@@ -435,6 +440,10 @@ kern_return_t vm_wire(host, map, start, size, access)
if (projected_buffer_in_range(map, start, start+size))
return(KERN_INVALID_ARGUMENT);
+ /* TODO: make it tunable */
+ if (!priv && access != VM_PROT_NONE && map->user_wired + size > 65536)
+ return KERN_NO_ACCESS;
+
return vm_map_pageable_user(map,
trunc_page(start),
round_page(start+size),