summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
Diffstat (limited to 'kern')
-rw-r--r--kern/Kconfig83
-rw-r--r--kern/Makefile42
-rw-r--r--kern/clock.h2
-rw-r--r--kern/cpumap.h28
-rw-r--r--kern/kernel.c4
-rw-r--r--kern/kernel.h4
-rw-r--r--kern/kmem.c4
-rw-r--r--kern/kmem_i.h2
-rw-r--r--kern/log.c4
-rw-r--r--kern/mutex.h6
-rw-r--r--kern/mutex_types.h6
-rw-r--r--kern/percpu.c4
-rw-r--r--kern/percpu.h4
-rw-r--r--kern/shell.h6
-rw-r--r--kern/shutdown.c4
-rw-r--r--kern/spinlock.c2
-rw-r--r--kern/syscnt.c4
-rw-r--r--kern/task.c4
-rw-r--r--kern/thread.c22
-rw-r--r--kern/thread.h3
-rw-r--r--kern/work.c2
-rw-r--r--kern/xcall.c2
22 files changed, 184 insertions, 58 deletions
diff --git a/kern/Kconfig b/kern/Kconfig
new file mode 100644
index 00000000..df0cbacb
--- /dev/null
+++ b/kern/Kconfig
@@ -0,0 +1,83 @@
+menu "General setup"
+
+config MULTIPROCESSOR
+ bool "Multiprocessor support"
+ default y
+ ---help---
+ Enable support for machines with multiple processors.
+
+config MAX_CPUS
+ int "Maximum number of supported CPUs" if MULTIPROCESSOR
+ range 2 512 if MULTIPROCESSOR
+ default "1" if !MULTIPROCESSOR
+ default "128" if MULTIPROCESSOR
+ ---help---
+ Maximum number of supported processors.
+
+config CLOCK_FREQ
+ int "Low resolution clock frequency"
+ range 100 1000
+ default 200
+ ---help---
+ The low resolution clock frequency determines how often low
+ resolution clocks interrupt processors. These clocks drive
+ the timer system. Low values increase throughput and latencies,
+ whereas high values reduce throughput and latencies.
+
+ The value must be usable as an integer divisor for 1000, with
+ no remainder.
+
+ Recommended values are 100 for throughput, 1000 for low
+ latencies, and 200 or 250 for a good balance between throughput
+ and latencies.
+
+choice
+ prompt "Mutex implementation"
+ default MUTEX_PLAIN
+ ---help---
+ A mutex is a sleeping synchronization object used throughout the
+ kernel and available to kernel applications. As a result, this
+ option affects all mutex users.
+
+ If in doubt, choose the plain implementation.
+
+config MUTEX_ADAPTIVE
+ bool "Adaptive spinning mutex"
+ ---help---
+ Adaptive spinning mutex, spinning instead of sleeping if the owner
+ is running, in the hope the critical section is short and the mutex
+ will be unlocked soon, to avoid expensive sleep/wakeup operations.
+ This implementation should improve overall performance at the cost
+ of increased latencies.
+
+config MUTEX_PI
+ bool "Mutex with priority inheritance"
+ ---help---
+ Real-time mutex with priority inheritance. This implementation
+ should improve latencies at the cost of overall performance.
+
+config MUTEX_PLAIN
+ bool "Plain mutex"
+ ---help---
+ Default implementation, immediately sleeping on contention.
+
+endchoice
+
+config SHELL
+ bool "Embedded shell"
+ default n
+ ---help---
+ Enable the embedded shell.
+
+ The embedded shell is mostly used for diagnostics.
+
+config THREAD_STACK_GUARD
+ bool "Thread stack guard pages"
+ ---help---
+ Enable the use of guard pages around kernel thread stacks to catch
+ overflows. Note that this feature wastes precious kernel virtual
+ memory and has some overhead during thread creation and destruction.
+
+ If unsure, disable.
+
+endmenu
diff --git a/kern/Makefile b/kern/Makefile
new file mode 100644
index 00000000..0aa96fc3
--- /dev/null
+++ b/kern/Makefile
@@ -0,0 +1,42 @@
+x15_SOURCES-y += \
+ kern/arg.c \
+ kern/bitmap.c \
+ kern/cbuf.c \
+ kern/clock.c \
+ kern/condition.c \
+ kern/console.c \
+ kern/cpumap.c \
+ kern/error.c \
+ kern/fmt.c \
+ kern/init.c \
+ kern/intr.c \
+ kern/kernel.c \
+ kern/kmem.c \
+ kern/llsync.c \
+ kern/log.c \
+ kern/mutex.c \
+ kern/panic.c \
+ kern/percpu.c \
+ kern/plist.c \
+ kern/printf.c \
+ kern/rbtree.c \
+ kern/rdxtree.c \
+ kern/rtmutex.c \
+ kern/semaphore.c \
+ kern/shutdown.c \
+ kern/sleepq.c \
+ kern/spinlock.c \
+ kern/sref.c \
+ kern/string.c \
+ kern/syscnt.c \
+ kern/task.c \
+ kern/thread.c \
+ kern/timer.c \
+ kern/turnstile.c \
+ kern/work.c \
+ kern/xcall.c
+
+x15_SOURCES-$(CONFIG_SHELL) += kern/shell.c
+
+x15_SOURCES-$(CONFIG_MUTEX_ADAPTIVE) += kern/mutex/mutex_adaptive.c
+x15_SOURCES-$(CONFIG_MUTEX_PLAIN) += kern/mutex/mutex_plain.c
diff --git a/kern/clock.h b/kern/clock.h
index fa48a477..854c146f 100644
--- a/kern/clock.h
+++ b/kern/clock.h
@@ -32,7 +32,7 @@
/*
* Clock frequency.
*/
-#define CLOCK_FREQ X15_CLOCK_FREQ
+#define CLOCK_FREQ CONFIG_CLOCK_FREQ
#if (CLOCK_FREQ < 100) || (CLOCK_FREQ > 1000) || (1000 % CLOCK_FREQ) != 0
#error "invalid clock frequency"
diff --git a/kern/cpumap.h b/kern/cpumap.h
index fd07afc1..89873b52 100644
--- a/kern/cpumap.h
+++ b/kern/cpumap.h
@@ -30,31 +30,31 @@
#include <kern/init.h>
struct cpumap {
- BITMAP_DECLARE(cpus, X15_MAX_CPUS);
+ BITMAP_DECLARE(cpus, CONFIG_MAX_CPUS);
};
static inline void
cpumap_zero(struct cpumap *cpumap)
{
- bitmap_zero(cpumap->cpus, X15_MAX_CPUS);
+ bitmap_zero(cpumap->cpus, CONFIG_MAX_CPUS);
}
static inline void
cpumap_fill(struct cpumap *cpumap)
{
- bitmap_fill(cpumap->cpus, X15_MAX_CPUS);
+ bitmap_fill(cpumap->cpus, CONFIG_MAX_CPUS);
}
static inline void
cpumap_copy(struct cpumap *dest, const struct cpumap *src)
{
- bitmap_copy(dest->cpus, src->cpus, X15_MAX_CPUS);
+ bitmap_copy(dest->cpus, src->cpus, CONFIG_MAX_CPUS);
}
static inline int
cpumap_cmp(const struct cpumap *a, const struct cpumap *b)
{
- return bitmap_cmp(a->cpus, b->cpus, X15_MAX_CPUS);
+ return bitmap_cmp(a->cpus, b->cpus, CONFIG_MAX_CPUS);
}
static inline void
@@ -90,50 +90,50 @@ cpumap_test(const struct cpumap *cpumap, int index)
static inline void
cpumap_and(struct cpumap *a, const struct cpumap *b)
{
- bitmap_and(a->cpus, b->cpus, X15_MAX_CPUS);
+ bitmap_and(a->cpus, b->cpus, CONFIG_MAX_CPUS);
}
static inline void
cpumap_or(struct cpumap *a, const struct cpumap *b)
{
- bitmap_or(a->cpus, b->cpus, X15_MAX_CPUS);
+ bitmap_or(a->cpus, b->cpus, CONFIG_MAX_CPUS);
}
static inline void
cpumap_xor(struct cpumap *a, const struct cpumap *b)
{
- bitmap_xor(a->cpus, b->cpus, X15_MAX_CPUS);
+ bitmap_xor(a->cpus, b->cpus, CONFIG_MAX_CPUS);
}
static inline int
cpumap_find_next(const struct cpumap *cpumap, int index)
{
- return bitmap_find_next(cpumap->cpus, X15_MAX_CPUS, index);
+ return bitmap_find_next(cpumap->cpus, CONFIG_MAX_CPUS, index);
}
static inline int
cpumap_find_first(const struct cpumap *cpumap)
{
- return bitmap_find_first(cpumap->cpus, X15_MAX_CPUS);
+ return bitmap_find_first(cpumap->cpus, CONFIG_MAX_CPUS);
}
static inline int
cpumap_find_next_zero(const struct cpumap *cpumap, int index)
{
- return bitmap_find_next_zero(cpumap->cpus, X15_MAX_CPUS, index);
+ return bitmap_find_next_zero(cpumap->cpus, CONFIG_MAX_CPUS, index);
}
static inline int
cpumap_find_first_zero(const struct cpumap *cpumap)
{
- return bitmap_find_first_zero(cpumap->cpus, X15_MAX_CPUS);
+ return bitmap_find_first_zero(cpumap->cpus, CONFIG_MAX_CPUS);
}
#define cpumap_for_each(cpumap, index) \
- bitmap_for_each((cpumap)->cpus, X15_MAX_CPUS, index)
+ bitmap_for_each((cpumap)->cpus, CONFIG_MAX_CPUS, index)
#define cpumap_for_each_zero(cpumap, index) \
- bitmap_for_each_zero((cpumap)->cpus, X15_MAX_CPUS, index)
+ bitmap_for_each_zero((cpumap)->cpus, CONFIG_MAX_CPUS, index)
/*
* Return a cpumap representing all active processors.
diff --git a/kern/kernel.c b/kern/kernel.c
index 755f42e6..418793d7 100644
--- a/kern/kernel.c
+++ b/kern/kernel.c
@@ -30,9 +30,9 @@ kernel_main(void)
init_setup();
vm_page_log_info();
-#ifdef X15_RUN_TEST_MODULE
+#ifdef CONFIG_TEST_MODULE
test_setup();
-#endif /* X15_RUN_TEST_MODULE */
+#endif /* CONFIG_TEST_MODULE */
/*
* Enabling application processors is done late in the boot process for
diff --git a/kern/kernel.h b/kern/kernel.h
index 22cae43b..f14b95b5 100644
--- a/kern/kernel.h
+++ b/kern/kernel.h
@@ -23,8 +23,8 @@
/*
* Kernel properties.
*/
-#define KERNEL_NAME PACKAGE_NAME
-#define KERNEL_VERSION PACKAGE_VERSION
+#define KERNEL_NAME "x15"
+#define KERNEL_VERSION CONFIG_KERNEL_VERSION
/*
* Machine-independent entry point.
diff --git a/kern/kmem.c b/kern/kmem.c
index ab33a86a..99664fc1 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -1123,7 +1123,7 @@ kmem_cache_info(struct kmem_cache *cache)
mutex_unlock(&cache->lock);
}
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
static struct kmem_cache *
kmem_lookup_cache(const char *name)
@@ -1184,7 +1184,7 @@ INIT_OP_DEFINE(kmem_setup_shell,
INIT_OP_DEP(shell_setup, true),
INIT_OP_DEP(thread_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static int __init
kmem_bootstrap(void)
diff --git a/kern/kmem_i.h b/kern/kmem_i.h
index beae6c45..0c0afd30 100644
--- a/kern/kmem_i.h
+++ b/kern/kmem_i.h
@@ -170,7 +170,7 @@ struct kmem_slab {
*/
struct kmem_cache {
/* CPU pool layer */
- struct kmem_cpu_pool cpu_pools[X15_MAX_CPUS];
+ struct kmem_cpu_pool cpu_pools[CONFIG_MAX_CPUS];
struct kmem_cpu_pool_type *cpu_pool_type;
/* Slab layer */
diff --git a/kern/log.c b/kern/log.c
index 83562206..3b1336e3 100644
--- a/kern/log.c
+++ b/kern/log.c
@@ -334,7 +334,7 @@ log_run(void *arg)
}
}
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
static void
log_dump(unsigned int level)
@@ -412,7 +412,7 @@ INIT_OP_DEFINE(log_setup_shell,
INIT_OP_DEP(log_setup, true),
INIT_OP_DEP(shell_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static int __init
log_setup(void)
diff --git a/kern/mutex.h b/kern/mutex.h
index c0a2c6e5..f152d317 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -25,11 +25,11 @@
#include <stdint.h>
-#if defined(X15_USE_MUTEX_ADAPTIVE)
+#if defined(CONFIG_MUTEX_ADAPTIVE)
#include <kern/mutex/mutex_adaptive_i.h>
-#elif defined(X15_USE_MUTEX_PI)
+#elif defined(CONFIG_MUTEX_PI)
#include <kern/mutex/mutex_pi_i.h>
-#elif defined(X15_USE_MUTEX_PLAIN)
+#elif defined(CONFIG_MUTEX_PLAIN)
#include <kern/mutex/mutex_plain_i.h>
#else
#error "unknown mutex implementation"
diff --git a/kern/mutex_types.h b/kern/mutex_types.h
index f0f8240c..574f4759 100644
--- a/kern/mutex_types.h
+++ b/kern/mutex_types.h
@@ -21,11 +21,11 @@
#ifndef _KERN_MUTEX_TYPES_H
#define _KERN_MUTEX_TYPES_H
-#if defined(X15_USE_MUTEX_ADAPTIVE)
+#if defined(CONFIG_MUTEX_ADAPTIVE)
#include <kern/mutex/mutex_adaptive_types.h>
-#elif defined(X15_USE_MUTEX_PI)
+#elif defined(CONFIG_MUTEX_PI)
#include <kern/mutex/mutex_pi_types.h>
-#elif defined(X15_USE_MUTEX_PLAIN)
+#elif defined(CONFIG_MUTEX_PLAIN)
#include <kern/mutex/mutex_plain_types.h>
#else
#error "unknown mutex implementation"
diff --git a/kern/percpu.c b/kern/percpu.c
index 7621bb29..62c3d22e 100644
--- a/kern/percpu.c
+++ b/kern/percpu.c
@@ -30,7 +30,7 @@
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-void *percpu_areas[X15_MAX_CPUS] __read_mostly;
+void *percpu_areas[CONFIG_MAX_CPUS] __read_mostly;
static void *percpu_area_content __initdata;
static size_t percpu_area_size __initdata;
@@ -52,7 +52,7 @@ percpu_setup(void)
unsigned int order;
percpu_area_size = &_percpu_end - &_percpu;
- log_info("percpu: max_cpus: %u, section size: %zuk", X15_MAX_CPUS,
+ log_info("percpu: max_cpus: %u, section size: %zuk", CONFIG_MAX_CPUS,
percpu_area_size >> 10);
assert(vm_page_aligned(percpu_area_size));
diff --git a/kern/percpu.h b/kern/percpu.h
index 385930a1..87d14703 100644
--- a/kern/percpu.h
+++ b/kern/percpu.h
@@ -86,10 +86,10 @@ extern char _percpu_end;
static inline void *
percpu_area(unsigned int cpu)
{
- extern void *percpu_areas[X15_MAX_CPUS];
+ extern void *percpu_areas[CONFIG_MAX_CPUS];
void *area;
- assert(cpu < X15_MAX_CPUS);
+ assert(cpu < CONFIG_MAX_CPUS);
area = percpu_areas[cpu];
assert(area != NULL);
return area;
diff --git a/kern/shell.h b/kern/shell.h
index f6377f07..ee56856e 100644
--- a/kern/shell.h
+++ b/kern/shell.h
@@ -25,7 +25,7 @@
#include <kern/error.h>
#include <kern/macros.h>
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
#define SHELL_REGISTER_CMDS(cmds) \
MACRO_BEGIN \
@@ -81,11 +81,11 @@ void shell_start(void);
*/
int shell_cmd_register(struct shell_cmd *cmd);
-#else /* X15_ENABLE_SHELL */
+#else /* CONFIG_SHELL */
#define SHELL_REGISTER_CMDS(cmds)
#define shell_setup()
#define shell_start()
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
/*
* This init operation provides :
diff --git a/kern/shutdown.c b/kern/shutdown.c
index b85cb1cb..510911fe 100644
--- a/kern/shutdown.c
+++ b/kern/shutdown.c
@@ -26,7 +26,7 @@
static struct plist shutdown_ops_list;
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
static void
shutdown_shell_halt(int argc, char **argv)
@@ -66,7 +66,7 @@ INIT_OP_DEFINE(shutdown_setup_shell,
INIT_OP_DEP(shell_setup, true),
INIT_OP_DEP(shutdown_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static int __init
shutdown_bootstrap(void)
diff --git a/kern/spinlock.c b/kern/spinlock.c
index fcb7c7b6..a591c61d 100644
--- a/kern/spinlock.c
+++ b/kern/spinlock.c
@@ -106,7 +106,7 @@
#error "spinlock qid too large"
#endif
-#if X15_MAX_CPUS > (1 << SPINLOCK_QID_CPU_BITS)
+#if CONFIG_MAX_CPUS > (1 << SPINLOCK_QID_CPU_BITS)
#error "maximum number of supported processors too large"
#endif
diff --git a/kern/syscnt.c b/kern/syscnt.c
index f1cc95a9..cd13a398 100644
--- a/kern/syscnt.c
+++ b/kern/syscnt.c
@@ -33,7 +33,7 @@
static struct list syscnt_list;
static struct mutex syscnt_lock;
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
static void
syscnt_shell_info(int argc, char **argv)
@@ -61,7 +61,7 @@ INIT_OP_DEFINE(syscnt_setup_shell,
INIT_OP_DEP(shell_setup, true),
INIT_OP_DEP(syscnt_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static int __init
syscnt_setup(void)
diff --git a/kern/task.c b/kern/task.c
index 35220d3e..7039b426 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -59,7 +59,7 @@ task_init(struct task *task, const char *name, struct vm_map *map)
strlcpy(task->name, name, sizeof(task->name));
}
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
static void
task_shell_info(int argc, char *argv[])
@@ -107,7 +107,7 @@ INIT_OP_DEFINE(task_setup_shell,
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static int __init
task_setup(void)
diff --git a/kern/thread.c b/kern/thread.c
index f9c22742..21736cd0 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -285,13 +285,13 @@ static struct thread_runq thread_runq __percpu;
* Statically allocated fake threads that provide thread context to processors
* during bootstrap.
*/
-static struct thread thread_booters[X15_MAX_CPUS] __initdata;
+static struct thread thread_booters[CONFIG_MAX_CPUS] __initdata;
static struct kmem_cache thread_cache;
-#ifndef X15_ENABLE_THREAD_STACK_GUARD
+#ifndef CONFIG_THREAD_STACK_GUARD
static struct kmem_cache thread_stack_cache;
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
static const unsigned char thread_policy_table[THREAD_NR_SCHED_POLICIES] = {
[THREAD_SCHED_POLICY_FIFO] = THREAD_SCHED_CLASS_RT,
@@ -1877,7 +1877,7 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags)
spinlock_unlock_intr_restore(&runq->lock, flags);
}
-#ifdef X15_ENABLE_THREAD_STACK_GUARD
+#ifdef CONFIG_THREAD_STACK_GUARD
#include <machine/pmap.h>
#include <vm/vm_kmem.h>
@@ -1939,7 +1939,7 @@ thread_free_stack(void *stack)
vm_kmem_free(va, (PAGE_SIZE * 2) + stack_size);
}
-#else /* X15_ENABLE_THREAD_STACK_GUARD */
+#else /* CONFIG_THREAD_STACK_GUARD */
static void *
thread_alloc_stack(void)
@@ -1953,7 +1953,7 @@ thread_free_stack(void *stack)
kmem_cache_free(&thread_stack_cache, stack);
}
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
static void
thread_destroy(struct thread *thread)
@@ -2190,7 +2190,7 @@ thread_setup_runq(struct thread_runq *runq)
thread_setup_idler(runq);
}
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
/*
* This function is meant for debugging only. As a result, it uses a weak
@@ -2266,7 +2266,7 @@ INIT_OP_DEFINE(thread_setup_shell,
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static void __init
thread_setup_common(unsigned int cpu)
@@ -2288,10 +2288,10 @@ thread_setup(void)
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread),
CPU_L1_SIZE, NULL, 0);
-#ifndef X15_ENABLE_THREAD_STACK_GUARD
+#ifndef CONFIG_THREAD_STACK_GUARD
kmem_cache_init(&thread_stack_cache, "thread_stack", TCB_STACK_SIZE,
CPU_DATA_ALIGN, NULL, 0);
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
cpumap_for_each(&thread_active_runqs, cpu) {
thread_setup_runq(percpu_ptr(thread_runq, cpu));
@@ -2308,7 +2308,7 @@ INIT_OP_DEFINE(thread_setup,
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_bootstrap, true),
INIT_OP_DEP(turnstile_setup, true),
-#ifdef X15_ENABLE_THREAD_STACK_GUARD
+#ifdef CONFIG_THREAD_STACK_GUARD
INIT_OP_DEP(vm_kmem_setup, true),
INIT_OP_DEP(vm_map_setup, true),
INIT_OP_DEP(vm_page_setup, true),
diff --git a/kern/thread.h b/kern/thread.h
index 164eed79..29c2dfe5 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -43,6 +43,7 @@
#include <kern/init.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
+#include <kern/kernel.h>
#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <kern/turnstile_types.h>
@@ -72,7 +73,7 @@ struct thread_sched_data {
#include <kern/thread_i.h>
-#define THREAD_KERNEL_PREFIX PACKAGE "_"
+#define THREAD_KERNEL_PREFIX KERNEL_NAME "_"
/*
* Scheduling policies.
diff --git a/kern/work.c b/kern/work.c
index ea3eb1f4..365ca30d 100644
--- a/kern/work.c
+++ b/kern/work.c
@@ -52,7 +52,7 @@
*/
#define WORK_THREADS_RATIO 4
#define WORK_THREADS_THRESHOLD 512
-#define WORK_MAX_THREADS MAX(X15_MAX_CPUS, WORK_THREADS_THRESHOLD)
+#define WORK_MAX_THREADS MAX(CONFIG_MAX_CPUS, WORK_THREADS_THRESHOLD)
/*
* Work pool flags.
diff --git a/kern/xcall.c b/kern/xcall.c
index 1251f28c..5a431b2f 100644
--- a/kern/xcall.c
+++ b/kern/xcall.c
@@ -50,7 +50,7 @@ struct xcall {
* between multiple cross-calls.
*/
struct xcall_cpu_data {
- alignas(CPU_L1_SIZE) struct xcall send_calls[X15_MAX_CPUS];
+ alignas(CPU_L1_SIZE) struct xcall send_calls[CONFIG_MAX_CPUS];
struct xcall *recv_call;
struct spinlock lock;