From a70741cdec17094276e9aa59bebe5cf4a0ef812b Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Fri, 20 Oct 2017 00:33:45 +0200 Subject: Start the machine-independant vm_ptable module --- arch/arm/machine/pmap.c | 285 +++++++--------------------------------------- arch/arm/machine/pmap.h | 63 +++-------- vm/Makefile | 3 +- vm/vm_ptable.c | 294 ++++++++++++++++++++++++++++++++++++++++++++++++ vm/vm_ptable.h | 59 ++++++++++ 5 files changed, 412 insertions(+), 292 deletions(-) create mode 100644 vm/vm_ptable.c create mode 100644 vm/vm_ptable.h diff --git a/arch/arm/machine/pmap.c b/arch/arm/machine/pmap.c index dc0a5263..390faf57 100644 --- a/arch/arm/machine/pmap.c +++ b/arch/arm/machine/pmap.c @@ -47,20 +47,33 @@ #include #include #include +#include #include -typedef pmap_pte_t (*pmap_make_pte_fn)(phys_addr_t pa, int prot); +#define PMAP_PTE_B 0x00000004 +#define PMAP_PTE_C 0x00000008 + +#define PMAP_PTE_L0_RW 0x00000030 +#define PMAP_PTE_L1_RW 0x00000c00 /* - * Properties of a page translation level. + * Page table level properties. */ -struct pmap_pt_level { - unsigned int skip; - unsigned int bits; - unsigned int ptes_per_pt; - pmap_make_pte_fn make_pte_fn; - pmap_make_pte_fn make_ll_pte_fn; -}; + +#define PMAP_NR_LEVELS 2 +#define PMAP_L0_BITS 8 +#define PMAP_L1_BITS 12 + +#define PMAP_VA_MASK 0xffffffff + +#define PMAP_PA_L0_MASK 0xfffff000 +#define PMAP_PA_L1_MASK 0xfffffc00 + +#define PMAP_L0_SKIP 12 +#define PMAP_L1_SKIP (PMAP_L0_SKIP + PMAP_L0_BITS) + +#define PMAP_L0_PTES_PER_PT (1 << PMAP_L0_BITS) +#define PMAP_L1_PTES_PER_PT (1 << PMAP_L1_BITS) static pmap_pte_t __boot pmap_make_coarse_pte(phys_addr_t pa, int prot) @@ -90,9 +103,9 @@ pmap_make_section_pte(phys_addr_t pa, int prot) } /* - * Table of page translation properties. + * Table of properties per page table level. */ -static struct pmap_pt_level pmap_pt_levels[] __read_mostly = { +static const struct vm_ptable_level pmap_pt_levels[] = { { PMAP_L0_SKIP, PMAP_L0_BITS, @@ -109,37 +122,12 @@ static struct pmap_pt_level pmap_pt_levels[] __read_mostly = { }, }; -/* - * Per-CPU page tables. - */ -struct pmap_cpu_table { - struct list node; - phys_addr_t root_ptp_pa; -}; - struct pmap { - struct pmap_cpu_table *cpu_tables[CONFIG_MAX_CPUS]; + struct vm_ptable ptable; }; -/* - * Type for page table walking functions. - * - * See pmap_walk_vas(). - */ -typedef void (*pmap_walk_fn_t)(phys_addr_t pa, unsigned int index, - unsigned int level); - -/* - * The kernel per-CPU page tables are used early enough during bootstrap - * that using a percpu variable would actually become ugly. This array - * is rather small anyway. - */ -static struct pmap_cpu_table pmap_kernel_cpu_tables[CONFIG_MAX_CPUS] __read_mostly; - struct pmap pmap_kernel_pmap; -struct pmap *pmap_current_ptr __percpu; - /* * Flags related to page protection. */ @@ -151,227 +139,41 @@ struct pmap *pmap_current_ptr __percpu; */ static pmap_pte_t pmap_prot_table[VM_PROT_ALL + 1] __read_mostly; -/* - * Structures related to inter-processor page table updates. - */ - -#define PMAP_UPDATE_OP_ENTER 1 -#define PMAP_UPDATE_OP_REMOVE 2 -#define PMAP_UPDATE_OP_PROTECT 3 - -struct pmap_update_enter_args { - uintptr_t va; - phys_addr_t pa; - int prot; - int flags; -}; - -struct pmap_update_remove_args { - uintptr_t start; - uintptr_t end; -}; - -struct pmap_update_protect_args { - uintptr_t start; - uintptr_t end; - int prot; -}; - -struct pmap_update_op { - struct cpumap cpumap; - unsigned int operation; - - union { - struct pmap_update_enter_args enter_args; - struct pmap_update_remove_args remove_args; - struct pmap_update_protect_args protect_args; - }; -}; - -/* - * Maximum number of operations that can be batched before an implicit - * update. - */ -#define PMAP_UPDATE_MAX_OPS 32 - -/* - * List of update operations. - * - * A list of update operations is a container of operations that are pending - * for a pmap. Updating can be implicit, e.g. when a list has reached its - * maximum size, or explicit, when pmap_update() is called. Operation lists - * are thread-local objects. - * - * The cpumap is the union of all processors affected by at least one - * operation. - */ -struct pmap_update_oplist { - alignas(CPU_L1_SIZE) struct cpumap cpumap; - struct pmap *pmap; - unsigned int nr_ops; - struct pmap_update_op ops[PMAP_UPDATE_MAX_OPS]; -}; - -/* - * Statically allocated data for the main booter thread. - */ -static struct cpumap pmap_booter_cpumap __initdata; -static struct pmap_update_oplist pmap_booter_oplist __initdata; - -/* - * Each regular thread gets an operation list from this cache. - */ -static struct kmem_cache pmap_update_oplist_cache; - -/* - * Queue holding update requests from remote processors. - */ -struct pmap_update_queue { - struct spinlock lock; - struct list requests; -}; - -/* - * Syncer thread. - * - * There is one such thread per processor. They are the recipients of - * update requests, providing thread context for the mapping operations - * they perform. - */ -struct pmap_syncer { - alignas(CPU_L1_SIZE) struct thread *thread; - struct pmap_update_queue queue; - struct syscnt sc_updates; - struct syscnt sc_update_enters; - struct syscnt sc_update_removes; - struct syscnt sc_update_protects; -}; - -#if 0 -static void pmap_sync(void *arg); -#endif - -static struct pmap_syncer pmap_syncer __percpu; - -/* - * Maximum number of mappings for which individual TLB invalidations can be - * performed. Global TLB flushes are done beyond this value. - */ -#define PMAP_UPDATE_MAX_MAPPINGS 64 - -/* - * Per processor request, queued on a remote processor. - * - * The number of mappings is used to determine whether it's best to flush - * individual TLB entries or globally flush the TLB. - */ -struct pmap_update_request { - alignas(CPU_L1_SIZE) struct list node; - struct spinlock lock; - struct thread *sender; - const struct pmap_update_oplist *oplist; - unsigned int nr_mappings; - int done; - int error; -}; - -/* - * Per processor array of requests. - * - * When an operation list is to be applied, the thread triggering the update - * acquires the processor-local array of requests and uses it to queue requests - * on remote processors. - */ -struct pmap_update_request_array { - struct pmap_update_request requests[CONFIG_MAX_CPUS]; - struct mutex lock; -}; - -static struct pmap_update_request_array pmap_update_request_array __percpu; - -static int pmap_do_remote_updates __read_mostly; - static struct kmem_cache pmap_cache; -static char pmap_panic_inval_msg[] __bootdata - = "pmap: invalid physical address"; static char pmap_panic_directmap_msg[] __bootdata - = "pmap: invalid direct physical mapping"; - -static __always_inline unsigned long -pmap_pte_index(uintptr_t va, const struct pmap_pt_level *pt_level) -{ - return ((va >> pt_level->skip) & ((1UL << pt_level->bits) - 1)); -} - -static void __boot -pmap_boot_enter(pmap_pte_t *root_ptp, uintptr_t va, phys_addr_t pa, - unsigned long pgsize) -{ - const struct pmap_pt_level *pt_level, *pt_levels; - unsigned int level, last_level; - pmap_pte_t *pt, *ptp, *pte; - - if (pa != (pa & PMAP_PA_L0_MASK)) { - boot_panic(pmap_panic_inval_msg); - } - - (void)pgsize; - - switch (pgsize) { - case (1 << PMAP_L1_SKIP): - last_level = 1; - break; - default: - last_level = 0; - } - - pt_levels = (void *)BOOT_VTOP((uintptr_t)pmap_pt_levels); - pt = root_ptp; - - for (level = PMAP_NR_LEVELS - 1; level != last_level; level--) { - pt_level = &pt_levels[level]; - pte = &pt[pmap_pte_index(va, pt_level)]; - - if (*pte != 0) { - ptp = (void *)(uintptr_t)(*pte & PMAP_PA_L0_MASK); /* XXX */ - } else { - ptp = bootmem_alloc(sizeof(pmap_pte_t) * pt_level->ptes_per_pt); - *pte = pt_level->make_pte_fn((uintptr_t)ptp, VM_PROT_ALL); - } - - pt = ptp; - } - - pt_level = &pt_levels[last_level]; - pte = &pt[pmap_pte_index(va, pt_level)]; - *pte = pt_level->make_ll_pte_fn(pa, VM_PROT_ALL); -} + = "vm_ptable: invalid direct physical mapping"; static unsigned long __boot pmap_boot_get_large_pgsize(void) { -#if 1 +#if 0 return (1 << PMAP_L1_SKIP); #else return PAGE_SIZE; #endif } -#define pmap_boot_enable_pgext(pgsize) ((void)(pgsize)) - pmap_pte_t * __boot pmap_setup_paging(void) { - struct pmap_cpu_table *cpu_table; + const struct vm_ptable_level *pt_levels; unsigned long i, size, pgsize; phys_addr_t pa, directmap_end; - pmap_pte_t *root_ptp; + struct vm_ptable *ptable; + struct pmap *kernel_pmap; uintptr_t va; + pt_levels = (void *)BOOT_VTOP((uintptr_t)&pmap_pt_levels); + kernel_pmap = (void *)BOOT_VTOP((uintptr_t)&pmap_kernel_pmap); + ptable = &kernel_pmap->ptable; + /* Use large pages for the direct physical mapping when possible */ pgsize = pmap_boot_get_large_pgsize(); - pmap_boot_enable_pgext(pgsize); + + /* TODO LPAE */ + + vm_ptable_init(ptable, pt_levels, ARRAY_SIZE(pmap_pt_levels)); /* * Create the initial mappings. The first is for the .boot section @@ -379,14 +181,12 @@ pmap_setup_paging(void) * direct physical mapping of physical memory. */ - root_ptp = bootmem_alloc(PMAP_L1_PTES_PER_PT * sizeof(pmap_pte_t)); - va = vm_page_trunc((uintptr_t)&_boot); pa = va; size = vm_page_round((uintptr_t)&_boot_end) - va; for (i = 0; i < size; i += PAGE_SIZE) { - pmap_boot_enter(root_ptp, va, pa, PAGE_SIZE); + vm_ptable_boot_enter(ptable, va, pa, PAGE_SIZE); va += PAGE_SIZE; pa += PAGE_SIZE; } @@ -402,15 +202,12 @@ pmap_setup_paging(void) pa = PMEM_RAM_START; for (i = PMEM_RAM_START; i < directmap_end; i += pgsize) { - pmap_boot_enter(root_ptp, va, pa, pgsize); + vm_ptable_boot_enter(ptable, va, pa, pgsize); va += pgsize; pa += pgsize; } - cpu_table = (void *)BOOT_VTOP((uintptr_t)&pmap_kernel_cpu_tables[0]); - cpu_table->root_ptp_pa = (uintptr_t)root_ptp; - - return root_ptp; + return vm_ptable_boot_root(ptable); } #if 0 diff --git a/arch/arm/machine/pmap.h b/arch/arm/machine/pmap.h index b62f5bec..d61eb00c 100644 --- a/arch/arm/machine/pmap.h +++ b/arch/arm/machine/pmap.h @@ -59,55 +59,9 @@ #define PMAP_START_KMEM_ADDRESS PMAP_END_DIRECTMAP_ADDRESS #define PMAP_END_KMEM_ADDRESS PMAP_END_KERNEL_ADDRESS -/* - * Page table entry flags. - */ -#define PMAP_PTE_TYPE_COARSE 0x00000001 -#define PMAP_PTE_TYPE_SMALL 0x00000002 -#define PMAP_PTE_TYPE_SECTION 0x00000002 - -#define PMAP_PTE_B 0x00000004 -#define PMAP_PTE_C 0x00000008 - -#define PMAP_PTE_L0_RW 0x00000030 -#define PMAP_PTE_L1_RW 0x00000c00 - -/* - * Page translation hierarchy properties. - */ - -#if 0 -/* - * Masks define valid bits at each page translation level. - * - * Additional bits such as the global bit can be added at runtime for optional - * features. - */ -#define PMAP_L0_MASK (PMAP_PA_MASK | PMAP_PTE_D | PMAP_PTE_A \ - | PMAP_PTE_PCD | PMAP_PTE_PWT | PMAP_PTE_US \ - | PMAP_PTE_RW | PMAP_PTE_P) -#define PMAP_L1_MASK (PMAP_PA_MASK | PMAP_PTE_A | PMAP_PTE_PCD \ - | PMAP_PTE_PWT | PMAP_PTE_US | PMAP_PTE_RW \ - | PMAP_PTE_P) -#endif - -#define PMAP_NR_LEVELS 2 -#define PMAP_L0_BITS 8 -#define PMAP_L1_BITS 12 - -#define PMAP_VA_MASK DECL_CONST(0xffffffff, UL) - -#define PMAP_PA_L0_MASK DECL_CONST(0xfffff000, UL) -#define PMAP_PA_L1_MASK DECL_CONST(0xfffffc00, UL) - -#define PMAP_L0_SKIP 12 -#define PMAP_L1_SKIP (PMAP_L0_SKIP + PMAP_L0_BITS) - -#define PMAP_L0_PTES_PER_PT (1 << PMAP_L0_BITS) -#define PMAP_L1_PTES_PER_PT (1 << PMAP_L1_BITS) - #ifndef __ASSEMBLER__ +#include #include #include @@ -118,6 +72,15 @@ #include #include +/* + * Page table entry types. + */ +#define PMAP_PTE_TYPE_FAULT 0x00000000 +#define PMAP_PTE_TYPE_COARSE 0x00000001 +#define PMAP_PTE_TYPE_SMALL 0x00000002 +#define PMAP_PTE_TYPE_SECTION 0x00000002 +#define PMAP_PTE_TYPE_MASK 0x00000003 + /* * Mapping creation flags. */ @@ -130,6 +93,12 @@ typedef phys_addr_t pmap_pte_t; */ struct pmap; +static __always_inline bool +pmap_pte_valid(pmap_pte_t pte) +{ + return (pte & PMAP_PTE_TYPE_MASK) != PMAP_PTE_TYPE_FAULT; +} + static inline struct pmap * pmap_get_kernel_pmap(void) { diff --git a/vm/Makefile b/vm/Makefile index a42fe244..9cbf9d41 100644 --- a/vm/Makefile +++ b/vm/Makefile @@ -2,4 +2,5 @@ x15_SOURCES-y += \ vm/vm_kmem.c \ vm/vm_map.c \ vm/vm_object.c \ - vm/vm_page.c + vm/vm_page.c \ + vm/vm_ptable.c diff --git a/vm/vm_ptable.c b/vm/vm_ptable.c new file mode 100644 index 00000000..7b6d230a --- /dev/null +++ b/vm/vm_ptable.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2010-2017 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * TODO Review locking. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vm_ptable_cpu_pt vm_ptable_boot_cpu_pt __bootdata; + +/* + * Structures related to inter-processor page table updates. + */ + +#define VM_PTABLE_UPDATE_OP_ENTER 1 +#define VM_PTABLE_UPDATE_OP_REMOVE 2 +#define VM_PTABLE_UPDATE_OP_PROTECT 3 + +struct vm_ptable_update_enter_args { + uintptr_t va; + phys_addr_t pa; + int prot; + int flags; +}; + +struct vm_ptable_update_remove_args { + uintptr_t start; + uintptr_t end; +}; + +struct vm_ptable_update_protect_args { + uintptr_t start; + uintptr_t end; + int prot; +}; + +struct vm_ptable_update_op { + struct cpumap cpumap; + unsigned int operation; + + union { + struct vm_ptable_update_enter_args enter_args; + struct vm_ptable_update_remove_args remove_args; + struct vm_ptable_update_protect_args protect_args; + }; +}; + +/* + * Maximum number of operations that can be batched before an implicit + * update. + */ +#define VM_PTABLE_UPDATE_MAX_OPS 32 + +/* + * List of update operations. + * + * A list of update operations is a container of operations that are pending + * for a pmap. Updating can be implicit, e.g. when a list has reached its + * maximum size, or explicit, when vm_ptable_update() is called. Operation lists + * are thread-local objects. + * + * The cpumap is the union of all processors affected by at least one + * operation. + */ +struct vm_ptable_update_oplist { + alignas(CPU_L1_SIZE) struct cpumap cpumap; + struct pmap *pmap; + unsigned int nr_ops; + struct vm_ptable_update_op ops[VM_PTABLE_UPDATE_MAX_OPS]; +}; + +/* + * Statically allocated data for the main booter thread. + */ +static struct vm_ptable_update_oplist vm_ptable_booter_oplist __initdata; + +/* + * Each regular thread gets an operation list from this cache. + */ +static struct kmem_cache vm_ptable_update_oplist_cache; + +/* + * Queue holding update requests from remote processors. + */ +struct vm_ptable_update_queue { + struct spinlock lock; + struct list requests; +}; + +/* + * Syncer thread. + * + * There is one such thread per processor. They are the recipients of + * update requests, providing thread context for the mapping operations + * they perform. + */ +struct vm_ptable_syncer { + alignas(CPU_L1_SIZE) struct thread *thread; + struct vm_ptable_update_queue queue; + struct syscnt sc_updates; + struct syscnt sc_update_enters; + struct syscnt sc_update_removes; + struct syscnt sc_update_protects; +}; + +#if 0 +static void vm_ptable_sync(void *arg); +#endif + +static struct vm_ptable_syncer vm_ptable_syncer __percpu; + +/* + * Maximum number of mappings for which individual TLB invalidations can be + * performed. Global TLB flushes are done beyond this value. + */ +#define VM_PTABLE_UPDATE_MAX_MAPPINGS 64 + +/* + * Per processor request, queued on a remote processor. + * + * The number of mappings is used to determine whether it's best to flush + * individual TLB entries or globally flush the TLB. + */ +struct vm_ptable_update_request { + alignas(CPU_L1_SIZE) struct list node; + struct spinlock lock; + struct thread *sender; + const struct vm_ptable_update_oplist *oplist; + unsigned int nr_mappings; + int done; + int error; +}; + +/* + * Per processor array of requests. + * + * When an operation list is to be applied, the thread triggering the update + * acquires the processor-local array of requests and uses it to queue requests + * on remote processors. + */ +struct vm_ptable_update_request_array { + struct vm_ptable_update_request requests[CONFIG_MAX_CPUS]; + struct mutex lock; +}; + +static struct vm_ptable_update_request_array vm_ptable_update_request_array + __percpu; + +static int vm_ptable_do_remote_updates __read_mostly; + +static char vm_ptable_panic_inval_msg[] __bootdata + = "vm_ptable: invalid physical address"; + +static __always_inline unsigned long +vm_ptable_pte_index(uintptr_t va, const struct vm_ptable_level *pt_level) +{ + return ((va >> pt_level->skip) & ((1UL << pt_level->bits) - 1)); +} + +void __boot +vm_ptable_init(struct vm_ptable *ptable, + const struct vm_ptable_level *pt_levels, + unsigned int nr_levels) +{ + const struct vm_ptable_level *pt_level; + struct vm_ptable_cpu_pt *pt; + + assert(nr_levels != 0); + + pt_level = &pt_levels[nr_levels - 1]; + pt = &vm_ptable_boot_cpu_pt; + pt->root = bootmem_alloc(pt_level->ptes_per_pt * sizeof(pmap_pte_t)); + ptable->cpu_pts[0] = pt; + + for (size_t i = 1; i < ARRAY_SIZE(ptable->cpu_pts); i++) { + ptable->cpu_pts[i] = NULL; + } + + ptable->pt_levels = pt_levels; + ptable->nr_levels = nr_levels; +} + +static __always_inline phys_addr_t +vm_ptable_pa_mask(const struct vm_ptable *ptable, unsigned int level) +{ + phys_addr_t size; + + if (level == 0) { + return ~PAGE_MASK; + } else { + size = ((phys_addr_t)1 << ptable->pt_levels[level - 1].bits) + * sizeof(pmap_pte_t); + return ~(size - 1); + } +} + +static __always_inline bool +vm_ptable_pa_aligned(const struct vm_ptable *ptable, phys_addr_t pa) +{ + return pa == (pa & vm_ptable_pa_mask(ptable, 0)); +} + +void __boot +vm_ptable_boot_enter(struct vm_ptable *ptable, uintptr_t va, + phys_addr_t pa, size_t pgsize) +{ + const struct vm_ptable_level *pt_level; + unsigned int level, last_level; + pmap_pte_t *pt, *next_pt, *pte; + phys_addr_t mask; + + if (!vm_ptable_pa_aligned(ptable, pa)) { + boot_panic(vm_ptable_panic_inval_msg); + } + +#if 0 + switch (pgsize) { + case (1 << PMAP_L1_SKIP): + last_level = 1; + break; + default: +#endif + last_level = 0; + pt = ptable->cpu_pts[0]->root; + + for (level = ptable->nr_levels - 1; level != last_level; level--) { + pt_level = &ptable->pt_levels[level]; + pte = &pt[vm_ptable_pte_index(va, pt_level)]; + + if (pmap_pte_valid(*pte)) { + mask = vm_ptable_pa_mask(ptable, level); + next_pt = (void *)(uintptr_t)(*pte & mask); + } else { + next_pt = bootmem_alloc(pt_level->ptes_per_pt * sizeof(pmap_pte_t)); + *pte = pt_level->make_pte_fn((uintptr_t)next_pt, VM_PROT_ALL); + } + + pt = next_pt; + } + + pt_level = &ptable->pt_levels[last_level]; + pte = &pt[vm_ptable_pte_index(va, pt_level)]; + *pte = pt_level->make_ll_pte_fn(pa, VM_PROT_ALL); +} + +pmap_pte_t * __boot +vm_ptable_boot_root(const struct vm_ptable *ptable) +{ + return ptable->cpu_pts[0]->root; +} diff --git a/vm/vm_ptable.h b/vm/vm_ptable.h new file mode 100644 index 00000000..0818864c --- /dev/null +++ b/vm/vm_ptable.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * TODO Comment. + */ + +#ifndef _VM_VM_PTABLE_H +#define _VM_VM_PTABLE_H + +#include +#include + +#include +#include + +struct vm_ptable_cpu_pt { + pmap_pte_t *root; +}; + +typedef pmap_pte_t (*vm_ptable_make_pte_fn)(phys_addr_t pa, int prot); + +struct vm_ptable_level { + unsigned int skip; + unsigned int bits; + unsigned int ptes_per_pt; + vm_ptable_make_pte_fn make_pte_fn; + vm_ptable_make_pte_fn make_ll_pte_fn; +}; + +struct vm_ptable { + struct vm_ptable_cpu_pt *cpu_pts[CONFIG_MAX_CPUS]; + const struct vm_ptable_level *pt_levels; + unsigned int nr_levels; +}; + +void vm_ptable_init(struct vm_ptable *ptable, + const struct vm_ptable_level *pt_levels, + unsigned int nr_levels); + +void vm_ptable_boot_enter(struct vm_ptable *ptable, uintptr_t va, + phys_addr_t pa, size_t pgsize); + +pmap_pte_t * vm_ptable_boot_root(const struct vm_ptable *ptable); + +#endif /* _VM_VM_PTABLE_H */ -- cgit v1.2.3