summaryrefslogtreecommitdiff
path: root/kern/kmem_i.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-01-11 21:31:53 +0100
committerRichard Braun <rbraun@sceen.net>2017-01-11 21:31:53 +0100
commit135f428f0a50eb9988f0b40a60357dfedbcc7f18 (patch)
tree38eb76350879b55227295a2fa31c7bdaffae08f9 /kern/kmem_i.h
parent0a7bb2b9e2441cd0610a0687f39a38b5c66a6f46 (diff)
kern/kmem: rework slab allocation
Allocating slabs from the page allocator only is likely to cause fragmentation. Instead, allocate larger-than-page slabs from kernel virtual memory, and page-sized slabs from the page allocator.
Diffstat (limited to 'kern/kmem_i.h')
-rw-r--r--kern/kmem_i.h4
1 files changed, 1 insertions, 3 deletions
diff --git a/kern/kmem_i.h b/kern/kmem_i.h
index 9a0973ba..08b11c54 100644
--- a/kern/kmem_i.h
+++ b/kern/kmem_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2014 Richard Braun.
+ * Copyright (c) 2010-2017 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -160,7 +160,6 @@ struct kmem_slab {
*/
#define KMEM_CF_SLAB_EXTERNAL 0x1 /* Slab data is off slab */
#define KMEM_CF_VERIFY 0x2 /* Debugging facilities enabled */
-#define KMEM_CF_DIRECT 0x4 /* Quick buf-to-slab lookup */
/*
* Cache of objects.
@@ -182,7 +181,6 @@ struct kmem_cache {
size_t align;
size_t buf_size; /* Aligned object size */
size_t bufctl_dist; /* Distance from buffer to bufctl */
- unsigned int slab_order;
size_t slab_size;
size_t color;
size_t color_max;