summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-04-20 18:46:19 +0200
committerRichard Braun <rbraun@sceen.net>2013-04-21 00:20:16 +0200
commitb106a4816e4d61a1b14e1fe29e0e988986c3b06c (patch)
tree0f08f973201081e8e1dd09c8321a177b2203ee7e
parent504501c3eb077ceec4884da2153083154c1ef146 (diff)
kern/kmem: rework slab lists handling
Don't enforce strong ordering of partial slabs. Separating partial slabs from free slabs is already effective against fragmentation, and sorting would sometimes cause pathological scalability issues. In addition, store new slabs (whether free or partial) in LIFO order for better cache usage.
-rw-r--r--kern/kmem.c84
-rw-r--r--kern/kmem_i.h7
2 files changed, 9 insertions, 82 deletions
diff --git a/kern/kmem.c b/kern/kmem.c
index 57985119..ea75d7d1 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -635,7 +635,7 @@ kmem_cache_grow(struct kmem_cache *cache)
mutex_lock(&cache->lock);
if (slab != NULL) {
- list_insert_tail(&cache->free_slabs, &slab->node);
+ list_insert(&cache->free_slabs, &slab->node);
cache->nr_bufs += cache->bufs_per_slab;
cache->nr_slabs++;
cache->nr_free_slabs++;
@@ -679,49 +679,17 @@ kmem_cache_alloc_from_slab(struct kmem_cache *cache)
slab->nr_refs++;
cache->nr_objs++;
- /*
- * The slab has become complete.
- */
if (slab->nr_refs == cache->bufs_per_slab) {
+ /* The slab has become complete */
list_remove(&slab->node);
if (slab->nr_refs == 1)
cache->nr_free_slabs--;
} else if (slab->nr_refs == 1) {
- /*
- * The slab has become partial.
- */
+ /* The slab has become partial */
list_remove(&slab->node);
- list_insert_tail(&cache->partial_slabs, &slab->node);
+ list_insert(&cache->partial_slabs, &slab->node);
cache->nr_free_slabs--;
- } else if (!list_singular(&cache->partial_slabs)) {
- struct list *node;
- struct kmem_slab *tmp;
-
- /*
- * The slab remains partial. If there are more than one partial slabs,
- * maintain the list sorted.
- */
-
- assert(slab->nr_refs > 1);
-
- for (node = list_prev(&slab->node);
- !list_end(&cache->partial_slabs, node);
- node = list_prev(node)) {
- tmp = list_entry(node, struct kmem_slab, node);
-
- if (tmp->nr_refs >= slab->nr_refs)
- break;
- }
-
- /*
- * If the direct neighbor was found, the list is already sorted.
- * If no slab was found, the slab is inserted at the head of the list.
- */
- if (node != list_prev(&slab->node)) {
- list_remove(&slab->node);
- list_insert_after(node, &slab->node);
- }
}
return kmem_bufctl_to_buf(bufctl, cache);
@@ -761,51 +729,17 @@ kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
slab->nr_refs--;
cache->nr_objs--;
- /*
- * The slab has become free.
- */
if (slab->nr_refs == 0) {
- /*
- * The slab was partial.
- */
- if (cache->bufs_per_slab > 1)
+ /* The slab has become free */
+
+ if (cache->bufs_per_slab != 1)
list_remove(&slab->node);
- list_insert_tail(&cache->free_slabs, &slab->node);
+ list_insert(&cache->free_slabs, &slab->node);
cache->nr_free_slabs++;
} else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
- /*
- * The slab has become partial.
- */
+ /* The slab has become partial */
list_insert(&cache->partial_slabs, &slab->node);
- } else if (!list_singular(&cache->partial_slabs)) {
- struct list *node;
- struct kmem_slab *tmp;
-
- /*
- * The slab remains partial. If there are more than one partial slabs,
- * maintain the list sorted.
- */
-
- assert(slab->nr_refs > 0);
-
- for (node = list_next(&slab->node);
- !list_end(&cache->partial_slabs, node);
- node = list_next(node)) {
- tmp = list_entry(node, struct kmem_slab, node);
-
- if (tmp->nr_refs <= slab->nr_refs)
- break;
- }
-
- /*
- * If the direct neighbor was found, the list is already sorted.
- * If no slab was found, the slab is inserted at the tail of the list.
- */
- if (node != list_next(&slab->node)) {
- list_remove(&slab->node);
- list_insert_before(node, &slab->node);
- }
}
}
diff --git a/kern/kmem_i.h b/kern/kmem_i.h
index f52e1152..8ccbe6c7 100644
--- a/kern/kmem_i.h
+++ b/kern/kmem_i.h
@@ -167,13 +167,6 @@ struct kmem_slab {
* Cache of objects.
*
* Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
- *
- * The partial slabs list is sorted by slab references. Slabs with a high
- * number of references are placed first on the list to reduce fragmentation.
- * Sorting occurs at insertion/removal of buffers in a slab. As the list
- * is maintained sorted, and the number of references only changes by one,
- * this is a very cheap operation in the average case and the worst (linear)
- * case is very unlikely.
*/
struct kmem_cache {
/* CPU pool layer */