summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-09-07 20:59:07 +0200
committerRichard Braun <rbraun@sceen.net>2017-09-07 20:59:07 +0200
commit888853dc07f1cdb9701f3612dea7828cc96b84cc (patch)
treeb255b28e240c2ccc9c43c10b8429766d8548fa53
parent4febbe1c657c026a33bdb16b51a0f317217b8d5a (diff)
Use accessors when referring to global kernel objects
The kernel_map/kernel_pmap/kernel_task/etc... names were reused as they were in the Mach source code. They've been a (mostly harmless) long-standing violation of the coding rules.
-rw-r--r--arch/x86/machine/pmap.c41
-rw-r--r--arch/x86/machine/pmap.h11
-rw-r--r--kern/task.c12
-rw-r--r--kern/task.h11
-rw-r--r--kern/thread.c6
-rw-r--r--test/test_vm_page_fill.c10
-rw-r--r--vm/vm_kmem.c31
-rw-r--r--vm/vm_kmem.h5
-rw-r--r--vm/vm_map.c6
-rw-r--r--vm/vm_map.h8
-rw-r--r--vm/vm_object.c2
-rw-r--r--vm/vm_object.h8
12 files changed, 95 insertions, 56 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index fb251b0..07b8a70 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -93,15 +93,14 @@ struct pmap {
typedef void (*pmap_walk_fn_t)(phys_addr_t pa, unsigned int index,
unsigned int level);
-static struct pmap kernel_pmap_store __read_mostly;
-struct pmap *kernel_pmap __read_mostly = &kernel_pmap_store;
-
/*
* The kernel per-CPU page tables are used early enough during bootstrap
* that using a percpu variable would actually become ugly. This array
* is rather small anyway.
*/
-static struct pmap_cpu_table kernel_pmap_cpu_tables[X15_MAX_CPUS] __read_mostly;
+static struct pmap_cpu_table pmap_kernel_cpu_tables[X15_MAX_CPUS] __read_mostly;
+
+struct pmap pmap_kernel_pmap;
struct pmap *pmap_current_ptr __percpu;
@@ -478,7 +477,7 @@ pmap_setup_paging(void)
}
#endif /* __LP64__ */
- cpu_table = (void *)BOOT_VTOP((uintptr_t)&kernel_pmap_cpu_tables[0]);
+ cpu_table = (void *)BOOT_VTOP((uintptr_t)&pmap_kernel_cpu_tables[0]);
cpu_table->root_ptp_pa = (uintptr_t)root_ptp;
return root_ptp;
@@ -494,7 +493,7 @@ pmap_ap_setup_paging(void)
pgsize = pmap_boot_get_pgsize();
pmap_boot_enable_pgext(pgsize);
- pmap = (void *)BOOT_VTOP((uintptr_t)&kernel_pmap_store);
+ pmap = (void *)BOOT_VTOP((uintptr_t)&pmap_kernel_pmap);
cpu_table = (void *)BOOT_VTOP((uintptr_t)pmap->cpu_tables[boot_ap_id]);
#ifdef X15_X86_PAE
@@ -513,7 +512,7 @@ MACRO_BEGIN \
assert(((end) <= PMAP_START_DIRECTMAP_ADDRESS) \
|| ((start) >= PMAP_END_DIRECTMAP_ADDRESS)); \
\
- if ((pmap) == kernel_pmap) { \
+ if ((pmap) == pmap_get_kernel_pmap()) { \
assert(((start) >= PMAP_START_KMEM_ADDRESS) \
&& ((end) <= PMAP_END_KMEM_ADDRESS)); \
} else { \
@@ -588,7 +587,7 @@ pmap_walk_vas(uintptr_t start, uintptr_t end, pmap_walk_fn_t walk_fn)
#endif /* __LP64__ */
va = start;
- root_ptp_pa = kernel_pmap->cpu_tables[cpu_id()]->root_ptp_pa;
+ root_ptp_pa = pmap_get_kernel_pmap()->cpu_tables[cpu_id()]->root_ptp_pa;
do {
#ifdef __LP64__
@@ -830,12 +829,12 @@ pmap_bootstrap(void)
struct pmap_cpu_table *cpu_table;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(kernel_pmap->cpu_tables); i++) {
- cpu_table = &kernel_pmap_cpu_tables[i];
- kernel_pmap->cpu_tables[i] = cpu_table;
+ for (i = 0; i < ARRAY_SIZE(pmap_get_kernel_pmap()->cpu_tables); i++) {
+ cpu_table = &pmap_kernel_cpu_tables[i];
+ pmap_get_kernel_pmap()->cpu_tables[i] = cpu_table;
}
- cpu_local_assign(pmap_current_ptr, kernel_pmap);
+ cpu_local_assign(pmap_current_ptr, pmap_get_kernel_pmap());
pmap_prot_table[VM_PROT_NONE] = 0;
pmap_prot_table[VM_PROT_READ] = 0;
@@ -918,7 +917,7 @@ INIT_OP_DEFINE(pmap_setup,
void __init
pmap_ap_setup(void)
{
- cpu_local_assign(pmap_current_ptr, kernel_pmap);
+ cpu_local_assign(pmap_current_ptr, pmap_get_kernel_pmap());
if (cpu_has_global_pages()) {
cpu_enable_global_pages();
@@ -993,14 +992,16 @@ static void __init
pmap_copy_cpu_table(unsigned int cpu)
{
struct pmap_cpu_table *cpu_table;
+ struct pmap *kernel_pmap;
unsigned int level;
const pmap_pte_t *sptp;
pmap_pte_t *dptp;
assert(cpu != 0);
- cpu_table = kernel_pmap->cpu_tables[cpu];
+ cpu_table = pmap_get_kernel_pmap()->cpu_tables[cpu];
level = PMAP_NR_LEVELS - 1;
+ kernel_pmap = pmap_get_kernel_pmap();
sptp = pmap_ptp_from_pa(kernel_pmap->cpu_tables[cpu_id()]->root_ptp_pa);
#ifdef X15_X86_PAE
@@ -1107,10 +1108,12 @@ int
pmap_kextract(uintptr_t va, phys_addr_t *pap)
{
const struct pmap_pt_level *pt_level;
+ struct pmap *kernel_pmap;
pmap_pte_t *ptp, *pte;
unsigned int level;
level = PMAP_NR_LEVELS - 1;
+ kernel_pmap = pmap_get_kernel_pmap();
ptp = pmap_ptp_from_pa(kernel_pmap->cpu_tables[cpu_id()]->root_ptp_pa);
for (;;) {
@@ -1168,7 +1171,7 @@ pmap_enter_local(struct pmap *pmap, uintptr_t va, phys_addr_t pa,
pte_bits = PMAP_PTE_RW;
- if (pmap != kernel_pmap) {
+ if (pmap != pmap_get_kernel_pmap()) {
pte_bits |= PMAP_PTE_US;
}
@@ -1203,7 +1206,7 @@ pmap_enter_local(struct pmap *pmap, uintptr_t va, phys_addr_t pa,
}
assert(!pmap_pte_valid(*pte));
- pte_bits = ((pmap == kernel_pmap) ? PMAP_PTE_G : PMAP_PTE_US)
+ pte_bits = ((pmap == pmap_get_kernel_pmap()) ? PMAP_PTE_G : PMAP_PTE_US)
| pmap_prot_table[prot & VM_PROT_ALL];
pmap_pte_set(pte, pa, pte_bits, pt_level);
return 0;
@@ -1377,7 +1380,7 @@ pmap_protect(struct pmap *pmap, uintptr_t va, int prot,
static void
pmap_flush_tlb(struct pmap *pmap, uintptr_t start, uintptr_t end)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
+ if ((pmap != pmap_current()) && (pmap != pmap_get_kernel_pmap())) {
return;
}
@@ -1390,11 +1393,11 @@ pmap_flush_tlb(struct pmap *pmap, uintptr_t start, uintptr_t end)
static void
pmap_flush_tlb_all(struct pmap *pmap)
{
- if ((pmap != pmap_current()) && (pmap != kernel_pmap)) {
+ if ((pmap != pmap_current()) && (pmap != pmap_get_kernel_pmap())) {
return;
}
- if (pmap == kernel_pmap) {
+ if (pmap == pmap_get_kernel_pmap()) {
cpu_tlb_flush_all();
} else {
cpu_tlb_flush();
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index 96fc21e..d8c2455 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -183,10 +183,13 @@ typedef phys_addr_t pmap_pte_t;
*/
struct pmap;
-/*
- * The kernel pmap.
- */
-extern struct pmap *kernel_pmap;
+static inline struct pmap *
+pmap_get_kernel_pmap(void)
+{
+ extern struct pmap pmap_kernel_pmap;
+
+ return &pmap_kernel_pmap;
+}
/*
* Early initialization of the MMU.
diff --git a/kern/task.c b/kern/task.c
index 1621056..35220d3 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -28,7 +28,6 @@
#include <kern/spinlock.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <vm/vm_kmem.h>
#include <vm/vm_map.h>
#ifdef __LP64__
@@ -37,11 +36,7 @@
#define TASK_INFO_ADDR_FMT "%08lx"
#endif /* __LP64__ */
-/*
- * Kernel task and storage.
- */
-static struct task kernel_task_store;
-struct task *kernel_task __read_mostly = &kernel_task_store;
+struct task task_kernel_task;
/*
* Cache for allocated tasks.
@@ -117,10 +112,13 @@ INIT_OP_DEFINE(task_setup_shell,
static int __init
task_setup(void)
{
+ struct task *kernel_task;
+
+ kernel_task = task_get_kernel_task();
kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, NULL, 0);
list_init(&task_list);
spinlock_init(&task_list_lock);
- task_init(kernel_task, "x15", kernel_map);
+ task_init(kernel_task, "x15", vm_map_get_kernel_map());
list_insert_head(&task_list, &kernel_task->node);
return 0;
}
diff --git a/kern/task.h b/kern/task.h
index 149ff49..4573979 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -43,10 +43,13 @@ struct task {
char name[TASK_NAME_SIZE];
};
-/*
- * The kernel task.
- */
-extern struct task *kernel_task;
+static inline struct task *
+task_get_kernel_task(void)
+{
+ extern struct task task_kernel_task;
+
+ return &task_kernel_task;
+}
static inline void
task_ref(struct task *task)
diff --git a/kern/thread.c b/kern/thread.c
index 64df194..f9c2274 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -116,6 +116,7 @@
#include <machine/page.h>
#include <machine/pmap.h>
#include <machine/tcb.h>
+#include <vm/vm_kmem.h>
#include <vm/vm_map.h>
/*
@@ -1693,7 +1694,7 @@ thread_init_booter(unsigned int cpu)
thread_set_user_priority(booter, 0);
thread_reset_real_priority(booter);
memset(booter->tsd, 0, sizeof(booter->tsd));
- booter->task = kernel_task;
+ booter->task = task_get_kernel_task();
snprintf(booter->name, sizeof(booter->name),
THREAD_KERNEL_PREFIX "thread_boot/%u", cpu);
}
@@ -1887,11 +1888,14 @@ thread_alloc_stack(void)
{
__unused struct vm_page *first_page, *last_page;
phys_addr_t first_pa, last_pa;
+ struct pmap *kernel_pmap;
size_t stack_size;
uintptr_t va;
void *mem;
__unused int error;
+ kernel_pmap = pmap_get_kernel_pmap();
+
stack_size = vm_page_round(TCB_STACK_SIZE);
mem = vm_kmem_alloc((PAGE_SIZE * 2) + stack_size);
diff --git a/test/test_vm_page_fill.c b/test/test_vm_page_fill.c
index 2b7f113..0b4278b 100644
--- a/test/test_vm_page_fill.c
+++ b/test/test_vm_page_fill.c
@@ -45,10 +45,15 @@ static unsigned char test_pattern = 1;
static void
test_write_pages(void)
{
+ struct vm_map *kernel_map;
+ struct pmap *kernel_pmap;
struct vm_page *page;
int error, flags;
uintptr_t va;
+ kernel_map = vm_map_get_kernel_map();
+ kernel_pmap = pmap_get_kernel_pmap();
+
for (;;) {
page = vm_page_alloc(0, VM_PAGE_SEL_HIGHMEM, VM_PAGE_KERNEL);
@@ -80,10 +85,15 @@ test_write_pages(void)
static void
test_reset_pages(void)
{
+ struct vm_map *kernel_map;
+ struct pmap *kernel_pmap;
struct vm_page *page;
int error, flags;
uintptr_t va;
+ kernel_map = vm_map_get_kernel_map();
+ kernel_pmap = pmap_get_kernel_pmap();
+
while (!list_empty(&test_pages)) {
page = list_first_entry(&test_pages, struct vm_page, node);
list_remove(&page->node);
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 0583bb9..c0a91f1 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -33,14 +33,6 @@
#include <vm/vm_page.h>
#include <vm/vm_prot.h>
-/*
- * Kernel map and storage.
- */
-static struct vm_map kernel_map_store;
-struct vm_map *kernel_map __read_mostly = &kernel_map_store;
-
-static struct vm_object vm_kmem_kernel_object;
-
static uint64_t
vm_kmem_offset(uintptr_t va)
{
@@ -54,7 +46,7 @@ vm_kmem_setup(void)
uint64_t size;
size = vm_kmem_offset(PMAP_END_KMEM_ADDRESS);
- vm_object_init(&vm_kmem_kernel_object, size);
+ vm_object_init(vm_object_get_kernel_object(), size);
return 0;
}
@@ -96,7 +88,7 @@ vm_kmem_alloc_va(size_t size)
va = 0;
flags = VM_MAP_FLAGS(VM_PROT_ALL, VM_PROT_ALL, VM_INHERIT_NONE,
VM_ADV_DEFAULT, 0);
- error = vm_map_enter(kernel_map, &va, size, 0, flags, NULL, 0);
+ error = vm_map_enter(vm_map_get_kernel_map(), &va, size, 0, flags, NULL, 0);
if (error) {
return NULL;
@@ -112,12 +104,14 @@ vm_kmem_free_va(void *addr, size_t size)
va = (uintptr_t)addr;
assert(vm_kmem_free_check(va, size) == 0);
- vm_map_remove(kernel_map, va, va + vm_page_round(size));
+ vm_map_remove(vm_map_get_kernel_map(), va, va + vm_page_round(size));
}
void *
vm_kmem_alloc(size_t size)
{
+ struct vm_object *kernel_object;
+ struct pmap *kernel_pmap;
struct vm_page *page;
uintptr_t va, start, end;
int error;
@@ -129,6 +123,9 @@ vm_kmem_alloc(size_t size)
return NULL;
}
+ kernel_object = vm_object_get_kernel_object();
+ kernel_pmap = pmap_get_kernel_pmap();
+
for (start = va, end = va + size; start < end; start += PAGE_SIZE) {
page = vm_page_alloc(0, VM_PAGE_SEL_HIGHMEM, VM_PAGE_KERNEL);
@@ -140,8 +137,7 @@ vm_kmem_alloc(size_t size)
* The page becomes managed by the object and is freed in case
* of failure.
*/
- error = vm_object_insert(&vm_kmem_kernel_object, page,
- vm_kmem_offset(start));
+ error = vm_object_insert(kernel_object, page, vm_kmem_offset(start));
if (error) {
goto error;
@@ -172,12 +168,14 @@ void
vm_kmem_free(void *addr, size_t size)
{
const struct cpumap *cpumap;
+ struct pmap *kernel_pmap;
uintptr_t va, end;
va = (uintptr_t)addr;
size = vm_page_round(size);
end = va + size;
cpumap = cpumap_all();
+ kernel_pmap = pmap_get_kernel_pmap();
while (va < end) {
pmap_remove(kernel_pmap, va, cpumap);
@@ -185,7 +183,7 @@ vm_kmem_free(void *addr, size_t size)
}
pmap_update(kernel_pmap);
- vm_object_remove(&vm_kmem_kernel_object,
+ vm_object_remove(vm_object_get_kernel_object(),
vm_kmem_offset((uintptr_t)addr),
vm_kmem_offset(end));
vm_kmem_free_va(addr, size);
@@ -195,11 +193,14 @@ void *
vm_kmem_map_pa(phys_addr_t pa, size_t size,
uintptr_t *map_vap, size_t *map_sizep)
{
+ struct pmap *kernel_pmap;
uintptr_t offset, map_va;
size_t map_size;
phys_addr_t start;
int error;
+ kernel_pmap = pmap_get_kernel_pmap();
+
start = vm_page_trunc(pa);
map_size = vm_page_round(pa + size) - start;
map_va = (uintptr_t)vm_kmem_alloc_va(map_size);
@@ -242,9 +243,11 @@ void
vm_kmem_unmap_pa(uintptr_t map_va, size_t map_size)
{
const struct cpumap *cpumap;
+ struct pmap *kernel_pmap;
uintptr_t va, end;
cpumap = cpumap_all();
+ kernel_pmap = pmap_get_kernel_pmap();
end = map_va + map_size;
for (va = map_va; va < end; va += PAGE_SIZE) {
diff --git a/vm/vm_kmem.h b/vm/vm_kmem.h
index 9bb90dc..5ae4a16 100644
--- a/vm/vm_kmem.h
+++ b/vm/vm_kmem.h
@@ -41,11 +41,6 @@ extern char _data;
extern char _end;
/*
- * The kernel map.
- */
-extern struct vm_map *kernel_map;
-
-/*
* Allocate pure virtual kernel pages.
*
* The caller is reponsible for taking care of the underlying physical memory.
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 180419a..713d92f 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -76,6 +76,8 @@ static int vm_map_insert(struct vm_map *map, struct vm_map_entry *entry,
static struct kmem_cache vm_map_entry_cache;
static struct kmem_cache vm_map_cache;
+struct vm_map vm_map_kernel_map;
+
static struct vm_map_entry *
vm_map_entry_create(void)
{
@@ -747,7 +749,7 @@ INIT_OP_DEFINE(vm_map_setup_shell,
static int __init
vm_map_bootstrap(void)
{
- vm_map_init(kernel_map, kernel_pmap,
+ vm_map_init(vm_map_get_kernel_map(), pmap_get_kernel_pmap(),
PMAP_START_KMEM_ADDRESS, PMAP_END_KMEM_ADDRESS);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
sizeof(struct vm_map_entry), 0, NULL,
@@ -808,7 +810,7 @@ vm_map_info(struct vm_map *map)
struct vm_map_entry *entry;
const char *type, *name;
- if (map == kernel_map) {
+ if (map == vm_map_get_kernel_map()) {
name = "kernel map";
} else {
name = "map";
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 653eabd..009c746 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -92,6 +92,14 @@ struct vm_map {
struct pmap *pmap;
};
+static inline struct vm_map *
+vm_map_get_kernel_map(void)
+{
+ extern struct vm_map vm_map_kernel_map;
+
+ return &vm_map_kernel_map;
+}
+
/*
* Create a virtual mapping.
*/
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 679f353..707008e 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -31,6 +31,8 @@
#include <vm/vm_page.h>
#include <machine/page.h>
+struct vm_object vm_object_kernel_object;
+
static int __init
vm_object_setup(void)
{
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 9ffe711..2f0e8e1 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -33,6 +33,14 @@
struct vm_object;
+static inline struct vm_object *
+vm_object_get_kernel_object(void)
+{
+ extern struct vm_object vm_object_kernel_object;
+
+ return &vm_object_kernel_object;
+}
+
/*
* Initialize a VM object.
*/