summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_anon.c12
-rw-r--r--vm/vm_map.c12
-rw-r--r--vm/vm_object.c36
-rw-r--r--vm/vm_object.h6
4 files changed, 57 insertions, 9 deletions
diff --git a/vm/vm_anon.c b/vm/vm_anon.c
index e6d1385f..ebee5fcc 100644
--- a/vm/vm_anon.c
+++ b/vm/vm_anon.c
@@ -113,16 +113,22 @@ vm_anon_get(struct vm_object *object, uint64_t offset,
struct vm_page **pagep)
{
struct vm_page *page;
+ int error;
page = vm_phys_alloc(0);
if (page == NULL)
return ERROR_NOMEM;
- /* TODO Insert page in object */
- (void)object;
- (void)offset;
+ error = vm_object_add(object, offset, page);
+
+ if (error)
+ goto error_object;
*pagep = page;
return 0;
+
+error_object:
+ vm_phys_free(page, 0);
+ return error;
}
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 50e9c361..6b43a985 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -786,6 +786,10 @@ vm_map_enter(struct vm_map *map, struct vm_object *object, uint64_t offset,
struct vm_map_request request;
int error;
+ /* XXX For now, prevent managed mappings in the kernel map */
+ if ((map == kernel_map) && (object != NULL))
+ return ERROR_INVAL;
+
mutex_lock(&map->lock);
error = vm_map_prepare(map, object, offset, *startp, size, align, flags,
@@ -922,6 +926,8 @@ vm_map_fault(struct vm_map *map, unsigned long addr, int access)
uint64_t offset;
int error, prot;
+ assert(map != kernel_map);
+
addr = vm_page_trunc(addr);
mutex_lock(&map->lock);
@@ -944,7 +950,10 @@ vm_map_fault(struct vm_map *map, unsigned long addr, int access)
offset = entry->offset + (addr - entry->start);
page = vm_object_get(object, offset);
- if (page == NULL) {
+ if (page != NULL)
+ printk("vm_map: fault: cache hit\n");
+ else {
+ printk("vm_map: fault: cache miss\n");
/* TODO Get neighbor pages */
error = object->pager->get(object, offset, &page);
@@ -960,7 +969,6 @@ vm_map_fault(struct vm_map *map, unsigned long addr, int access)
panic("vm_map: unable to create physical mapping");
pmap_update(map->pmap, addr, addr + PAGE_SIZE);
- error = 0;
out:
mutex_unlock(&map->lock);
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 2fa2caae..9e91aad9 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -15,19 +15,47 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <kern/assert.h>
+#include <kern/error.h>
+#include <kern/llsync.h>
+#include <kern/mutex.h>
+#include <kern/param.h>
#include <kern/rdxtree.h>
#include <kern/stddef.h>
#include <kern/stdint.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+int
+vm_object_add(struct vm_object *object, uint64_t offset, struct vm_page *page)
+{
+ int error;
+
+ mutex_lock(&object->lock);
+
+ error = rdxtree_insert(&object->pages, offset >> PAGE_SHIFT, page);
+
+ if (!error)
+ object->nr_pages++;
+
+ mutex_unlock(&object->lock);
+
+ assert(!error || (error == ERROR_NOMEM));
+
+ return error;
+}
+
struct vm_page *
vm_object_get(const struct vm_object *object, uint64_t offset)
{
- (void)object;
- (void)offset;
+ struct vm_page *page;
+
+ llsync_read_enter();
+
+ /* TODO Handle page state changes */
+ page = rdxtree_lookup(&object->pages, offset >> PAGE_SHIFT);
- /* TODO Bump radix tree key size to 64-bits */
+ llsync_read_leave();
- return NULL;
+ return page;
}
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 578720ee..5796846c 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -64,6 +64,12 @@ vm_object_init(struct vm_object *object, struct vm_object_pager *pager)
}
/*
+ * Add a page at offset inside an object.
+ */
+int vm_object_add(struct vm_object *object, uint64_t offset,
+ struct vm_page *page);
+
+/*
* Get the page at offset inside an object, or NULL if none is found.
*/
struct vm_page * vm_object_get(const struct vm_object *object, uint64_t offset);