summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-06-24 10:24:01 +0200
committerRichard Braun <rbraun@sceen.net>2018-06-24 10:24:01 +0200
commit381fbc8fb55a20eb1b9793e4934c837d27222c01 (patch)
tree9822b4b1640564da836acb6294b81a7677fc2dc4
parent7615ab33c6d9e0b858aa3f183095a83f7a1d0620 (diff)
kern/kmem: add option to disable the CPU layer
-rw-r--r--kern/Kconfig19
-rw-r--r--kern/kmem.c86
-rw-r--r--kern/kmem_i.h12
3 files changed, 90 insertions, 27 deletions
diff --git a/kern/Kconfig b/kern/Kconfig
index fced67c2..7dd04a6a 100644
--- a/kern/Kconfig
+++ b/kern/Kconfig
@@ -104,6 +104,25 @@ config KMEM_DEBUG
---help---
Enable the debugging of the kernel allocator.
+config KMEM_NO_CPU_LAYER
+ bool "Disable the CPU layer of the kernel allocator"
+ depends on SMP
+ default n
+ ---help---
+ The kernel allocator is made up of two layers named
+ the CPU and slab layers. the CPU layer is responsible
+ for caching objects locally on a processor to reduce
+ contention and cache misses. When reporting kernel
+ memory usage, the CPU layer is ignored, and objects
+ stored in it are accounted as allocated, which can
+ cause confusion when checking the state of the system.
+
+ This option disables the CPU layer entirely, so that
+ the report usage stats match the actual allocator
+ state.
+
+ If unsure, disable (leave the CPU layer enabled).
+
config INIT_DEBUG
bool "Initialization debugging"
default n
diff --git a/kern/kmem.c b/kern/kmem.c
index b87f4b1c..8856292b 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2017 Richard Braun.
+ * Copyright (c) 2010-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -120,6 +120,8 @@
#define KMEM_ERR_MODIFIED 3 /* Buffer modified while free */
#define KMEM_ERR_REDZONE 4 /* Redzone violation */
+#ifdef KMEM_USE_CPU_LAYER
+
/*
* Available CPU pool types.
*
@@ -140,6 +142,8 @@ static struct kmem_cpu_pool_type kmem_cpu_pool_types[] __read_mostly = {
*/
static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)];
+#endif /* KMEM_USE_CPU_LAYER */
+
/*
* Cache for off slab data.
*/
@@ -348,6 +352,8 @@ kmem_slab_buf(const struct kmem_slab *slab)
return P2ALIGN((uintptr_t)slab->addr, PAGE_SIZE);
}
+#ifdef KMEM_USE_CPU_LAYER
+
static void
kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
{
@@ -436,6 +442,8 @@ kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
mutex_unlock(&cache->lock);
}
+#endif /* KMEM_USE_CPU_LAYER */
+
static void
kmem_cache_error(struct kmem_cache *cache, void *buf, int error, void *arg)
{
@@ -547,8 +555,7 @@ void
kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size,
size_t align, kmem_ctor_fn_t ctor, int flags)
{
- struct kmem_cpu_pool_type *cpu_pool_type;
- size_t i, buf_size;
+ size_t buf_size;
#ifdef CONFIG_KMEM_DEBUG
cache->flags = KMEM_CF_VERIFY;
@@ -599,15 +606,15 @@ kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size,
kmem_cache_compute_properties(cache, flags);
- for (cpu_pool_type = kmem_cpu_pool_types;
- buf_size <= cpu_pool_type->buf_size;
- cpu_pool_type++);
-
- cache->cpu_pool_type = cpu_pool_type;
+#ifdef KMEM_USE_CPU_LAYER
+ for (cache->cpu_pool_type = kmem_cpu_pool_types;
+ buf_size <= cache->cpu_pool_type->buf_size;
+ cache->cpu_pool_type++);
- for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) {
+ for (size_t i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) {
kmem_cpu_pool_init(&cache->cpu_pools[i], cache);
}
+#endif /* KMEM_USE_CPU_LAYER */
mutex_lock(&kmem_cache_list_lock);
list_insert_tail(&kmem_cache_list, &cache->node);
@@ -885,10 +892,12 @@ kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct)
void *
kmem_cache_alloc(struct kmem_cache *cache)
{
- struct kmem_cpu_pool *cpu_pool;
- int filled, verify;
+ bool filled;
void *buf;
+#ifdef KMEM_USE_CPU_LAYER
+ struct kmem_cpu_pool *cpu_pool;
+
thread_pin();
cpu_pool = kmem_cpu_pool_get(cache);
@@ -896,6 +905,8 @@ kmem_cache_alloc(struct kmem_cache *cache)
fast_alloc:
if (likely(cpu_pool->nr_objs > 0)) {
+ bool verify;
+
buf = kmem_cpu_pool_pop(cpu_pool);
verify = (cpu_pool->flags & KMEM_CF_VERIFY);
mutex_unlock(&cpu_pool->lock);
@@ -931,6 +942,7 @@ fast_alloc:
mutex_unlock(&cpu_pool->lock);
thread_unpin();
+#endif /* KMEM_USE_CPU_LAYER */
slab_alloc:
mutex_lock(&cache->lock);
@@ -1024,6 +1036,7 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
void
kmem_cache_free(struct kmem_cache *cache, void *obj)
{
+#ifdef KMEM_USE_CPU_LAYER
struct kmem_cpu_pool *cpu_pool;
void **array;
@@ -1082,6 +1095,11 @@ fast_free:
}
thread_unpin();
+#else /* KMEM_USE_CPU_LAYER */
+ if (cache->flags & KMEM_CF_VERIFY) {
+ kmem_cache_free_verify(cache, obj);
+ }
+#endif /* KMEM_USE_CPU_LAYER */
mutex_lock(&cache->lock);
kmem_cache_free_to_slab(cache, obj);
@@ -1112,13 +1130,16 @@ kmem_cache_info(struct kmem_cache *cache)
"kmem: nr_slabs: %lu\n"
"kmem: nr_free_slabs: %lu\n"
"kmem: buftag_dist: %zu\n"
- "kmem: redzone_pad: %zu\n"
- "kmem: cpu_pool_size: %d\n", cache->flags, flags_str, cache->obj_size,
+ "kmem: redzone_pad: %zu\n",
+ cache->flags, flags_str, cache->obj_size,
cache->align, cache->buf_size, cache->bufctl_dist,
cache->slab_size, cache->color_max, cache->bufs_per_slab,
cache->nr_objs, cache->nr_bufs, cache->nr_slabs,
- cache->nr_free_slabs, cache->buftag_dist, cache->redzone_pad,
- cache->cpu_pool_type->array_size);
+ cache->nr_free_slabs, cache->buftag_dist, cache->redzone_pad);
+
+#ifdef KMEM_USE_CPU_LAYER
+ printf("kmem: cpu_pool_size: %d\n", cache->cpu_pool_type->array_size);
+#endif /* KMEM_USE_CPU_LAYER */
mutex_unlock(&cache->lock);
}
@@ -1186,20 +1207,15 @@ INIT_OP_DEFINE(kmem_setup_shell,
#endif /* CONFIG_SHELL */
-static int __init
-kmem_bootstrap(void)
+#ifdef KMEM_USE_CPU_LAYER
+static void
+kmem_bootstrap_cpu(void)
{
struct kmem_cpu_pool_type *cpu_pool_type;
char name[KMEM_NAME_SIZE];
- size_t i, size;
+ size_t size;
- /* Make sure a bufctl can always be stored in a buffer */
- assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN);
-
- list_init(&kmem_cache_list);
- mutex_init(&kmem_cache_list_lock);
-
- for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
+ for (size_t i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
cpu_pool_type = &kmem_cpu_pool_types[i];
cpu_pool_type->array_cache = &kmem_cpu_array_caches[i];
sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
@@ -1207,6 +1223,24 @@ kmem_bootstrap(void)
kmem_cache_init(cpu_pool_type->array_cache, name, size,
cpu_pool_type->array_align, NULL, 0);
}
+}
+#endif /* KMEM_USE_CPU_LAYER */
+
+static int __init
+kmem_bootstrap(void)
+{
+ char name[KMEM_NAME_SIZE];
+ size_t size;
+
+ /* Make sure a bufctl can always be stored in a buffer */
+ assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN);
+
+ list_init(&kmem_cache_list);
+ mutex_init(&kmem_cache_list_lock);
+
+#ifdef KMEM_USE_CPU_LAYER
+ kmem_bootstrap_cpu();
+#endif /* KMEM_USE_CPU_LAYER */
/*
* Prevent off slab data for the slab cache to avoid infinite recursion.
@@ -1216,7 +1250,7 @@ kmem_bootstrap(void)
size = 1 << KMEM_CACHES_FIRST_ORDER;
- for (i = 0; i < ARRAY_SIZE(kmem_caches); i++) {
+ for (size_t i = 0; i < ARRAY_SIZE(kmem_caches); i++) {
sprintf(name, "kmem_%zu", size);
kmem_cache_init(&kmem_caches[i], name, size, 0, NULL, 0);
size <<= 1;
diff --git a/kern/kmem_i.h b/kern/kmem_i.h
index f1705965..64da686a 100644
--- a/kern/kmem_i.h
+++ b/kern/kmem_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2017 Richard Braun.
+ * Copyright (c) 2010-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,12 @@
#include <kern/mutex.h>
#include <machine/cpu.h>
+#if defined(CONFIG_SMP) && !defined(CONFIG_KMEM_NO_CPU_LAYER)
+#define KMEM_USE_CPU_LAYER
+#endif
+
+#ifdef KMEM_USE_CPU_LAYER
+
/*
* Per-processor cache of pre-constructed objects.
*
@@ -52,6 +58,8 @@ struct kmem_cpu_pool_type {
struct kmem_cache *array_cache;
};
+#endif /* KMEM_USE_CPU_LAYER */
+
/*
* Buffer descriptor.
*
@@ -169,9 +177,11 @@ struct kmem_slab {
* Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
*/
struct kmem_cache {
+#ifdef KMEM_USE_CPU_LAYER
/* CPU pool layer */
struct kmem_cpu_pool cpu_pools[CONFIG_MAX_CPUS];
struct kmem_cpu_pool_type *cpu_pool_type;
+#endif /* KMEM_USE_CPU_LAYER */
/* Slab layer */
struct mutex lock;