summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-01-11 22:20:49 +0100
committerRichard Braun <rbraun@sceen.net>2017-01-11 23:14:22 +0100
commit436cf120107a79f0cd27d5ea4929a0a16dd8177f (patch)
tree219765b359cd8104dcd902fb4c25ab684f4b5613
parent135f428f0a50eb9988f0b40a60357dfedbcc7f18 (diff)
vm/vm_page: provide accessors to private data
Make per-page private data generic, provide accessors, and make the slab allocator use them.
-rw-r--r--kern/kmem.c6
-rw-r--r--vm/vm_page.h17
2 files changed, 19 insertions, 4 deletions
diff --git a/kern/kmem.c b/kern/kmem.c
index bda5dcbf..0f3b4962 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -667,8 +667,8 @@ kmem_cache_register(struct kmem_cache *cache, struct kmem_slab *slab)
assert(page != NULL);
assert((virtual && vm_page_type(page) == VM_PAGE_KERNEL)
|| (!virtual && vm_page_type(page) == VM_PAGE_KMEM));
- assert(page->slab_priv == NULL);
- page->slab_priv = slab;
+ assert(vm_page_get_priv(page) == NULL);
+ vm_page_set_priv(page, slab);
}
}
@@ -708,7 +708,7 @@ kmem_cache_lookup(struct kmem_cache *cache, void *buf)
return NULL;
}
- slab = page->slab_priv;
+ slab = vm_page_get_priv(page);
assert((unsigned long)buf >= kmem_slab_buf(slab));
assert((unsigned long)buf < (kmem_slab_buf(slab) + cache->slab_size));
return slab;
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 097bcc6b..1ce172f2 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -76,7 +76,7 @@ struct vm_page {
unsigned short seg_index;
unsigned short order;
phys_addr_t phys_addr;
- void *slab_priv;
+ void *priv;
};
static inline unsigned short
@@ -122,6 +122,21 @@ vm_page_direct_ptr(const struct vm_page *page)
}
/*
+ * Associate private data with a page.
+ */
+static inline void
+vm_page_set_priv(struct vm_page *page, void *priv)
+{
+ page->priv = priv;
+}
+
+static inline void *
+vm_page_get_priv(const struct vm_page *page)
+{
+ return page->priv;
+}
+
+/*
* Load physical memory into the vm_page module at boot time.
*
* All addresses must be page-aligned. Segments can be loaded in any order.