summaryrefslogtreecommitdiff
path: root/kern/kmem.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-05-16 00:11:51 +0200
committerRichard Braun <rbraun@sceen.net>2013-05-16 00:11:51 +0200
commitb7b82ee44ed8c4c2b0097a9f9fa22b0f017ad50a (patch)
tree8eb86a97de29915042d35901ee530a9e575dc5f8 /kern/kmem.c
parent2b89ca9ed9d8188a5fdff8ceaabc07fe86b43ad3 (diff)
kern/kmem: reduce fragmentation
This reverts a change brought when reworking slab lists handling that made the allocator store slabs in LIFO order, whatever their reference count. While it's fine for free slabs, it actually increased fragmentation for partial slabs.
Diffstat (limited to 'kern/kmem.c')
-rw-r--r--kern/kmem.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kern/kmem.c b/kern/kmem.c
index 46c9c10b..09870574 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -686,9 +686,12 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache)
if (slab->nr_refs == 1)
cache->nr_free_slabs--;
} else if (slab->nr_refs == 1) {
- /* The slab has become partial */
+ /*
+ * The slab has become partial. Insert the new slab at the end of
+ * the list to reduce fragmentation.
+ */
list_remove(&slab->node);
- list_insert_head(&cache->partial_slabs, &slab->node);
+ list_insert_tail(&cache->partial_slabs, &slab->node);
cache->nr_free_slabs--;
}