summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-09-21 01:23:37 +0200
committerRichard Braun <rbraun@sceen.net>2017-09-21 01:26:09 +0200
commitb0b860e6cd8949c05da380dbe64b7c7c6ec75e7c (patch)
treea1c7d98eb2a370975bd82c6d3dc16349636ddddf /kern/thread.c
parent833316f34266a5fab2668f34c03e45db64d0ce76 (diff)
New build system
The new build system, called xbuild, is a minimalistic kbuild-like make-based build system, also using kconfig for scalable configurations.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kern/thread.c b/kern/thread.c
index f9c2274..21736cd 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -285,13 +285,13 @@ static struct thread_runq thread_runq __percpu;
* Statically allocated fake threads that provide thread context to processors
* during bootstrap.
*/
-static struct thread thread_booters[X15_MAX_CPUS] __initdata;
+static struct thread thread_booters[CONFIG_MAX_CPUS] __initdata;
static struct kmem_cache thread_cache;
-#ifndef X15_ENABLE_THREAD_STACK_GUARD
+#ifndef CONFIG_THREAD_STACK_GUARD
static struct kmem_cache thread_stack_cache;
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
static const unsigned char thread_policy_table[THREAD_NR_SCHED_POLICIES] = {
[THREAD_SCHED_POLICY_FIFO] = THREAD_SCHED_CLASS_RT,
@@ -1877,7 +1877,7 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags)
spinlock_unlock_intr_restore(&runq->lock, flags);
}
-#ifdef X15_ENABLE_THREAD_STACK_GUARD
+#ifdef CONFIG_THREAD_STACK_GUARD
#include <machine/pmap.h>
#include <vm/vm_kmem.h>
@@ -1939,7 +1939,7 @@ thread_free_stack(void *stack)
vm_kmem_free(va, (PAGE_SIZE * 2) + stack_size);
}
-#else /* X15_ENABLE_THREAD_STACK_GUARD */
+#else /* CONFIG_THREAD_STACK_GUARD */
static void *
thread_alloc_stack(void)
@@ -1953,7 +1953,7 @@ thread_free_stack(void *stack)
kmem_cache_free(&thread_stack_cache, stack);
}
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
static void
thread_destroy(struct thread *thread)
@@ -2190,7 +2190,7 @@ thread_setup_runq(struct thread_runq *runq)
thread_setup_idler(runq);
}
-#ifdef X15_ENABLE_SHELL
+#ifdef CONFIG_SHELL
/*
* This function is meant for debugging only. As a result, it uses a weak
@@ -2266,7 +2266,7 @@ INIT_OP_DEFINE(thread_setup_shell,
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_setup, true));
-#endif /* X15_ENABLE_SHELL */
+#endif /* CONFIG_SHELL */
static void __init
thread_setup_common(unsigned int cpu)
@@ -2288,10 +2288,10 @@ thread_setup(void)
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread),
CPU_L1_SIZE, NULL, 0);
-#ifndef X15_ENABLE_THREAD_STACK_GUARD
+#ifndef CONFIG_THREAD_STACK_GUARD
kmem_cache_init(&thread_stack_cache, "thread_stack", TCB_STACK_SIZE,
CPU_DATA_ALIGN, NULL, 0);
-#endif /* X15_ENABLE_THREAD_STACK_GUARD */
+#endif /* CONFIG_THREAD_STACK_GUARD */
cpumap_for_each(&thread_active_runqs, cpu) {
thread_setup_runq(percpu_ptr(thread_runq, cpu));
@@ -2308,7 +2308,7 @@ INIT_OP_DEFINE(thread_setup,
INIT_OP_DEP(task_setup, true),
INIT_OP_DEP(thread_bootstrap, true),
INIT_OP_DEP(turnstile_setup, true),
-#ifdef X15_ENABLE_THREAD_STACK_GUARD
+#ifdef CONFIG_THREAD_STACK_GUARD
INIT_OP_DEP(vm_kmem_setup, true),
INIT_OP_DEP(vm_map_setup, true),
INIT_OP_DEP(vm_page_setup, true),