summaryrefslogtreecommitdiff
path: root/arch/x86/machine/pmap.h
blob: ea5692d74514f2f4cac245563da0cd28d6c71825 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
/*
 * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 *
 * TODO Comment.
 */

#ifndef _X86_PMAP_H
#define _X86_PMAP_H

#include <kern/macros.h>

/*
 * Page table entry flags.
 */
#define PMAP_PTE_P      0x00000001
#define PMAP_PTE_RW     0x00000002
#define PMAP_PTE_US     0x00000004
#define PMAP_PTE_PWT    0x00000008
#define PMAP_PTE_PCD    0x00000010
#define PMAP_PTE_A      0x00000020
#define PMAP_PTE_D      0x00000040
#define PMAP_PTE_PS     0x00000080
#define PMAP_PTE_G      0x00000100

/*
 * Page translation hierarchy properties.
 */

/*
 * Masks define valid bits at each page translation level.
 *
 * Additional bits such as the global bit can be added at runtime for optional
 * features.
 */
#define PMAP_L0_MASK    (PMAP_PA_MASK | PMAP_PTE_D | PMAP_PTE_A \
                         | PMAP_PTE_PCD | PMAP_PTE_PWT | PMAP_PTE_US \
                         | PMAP_PTE_RW | PMAP_PTE_P)
#define PMAP_L1_MASK    (PMAP_PA_MASK | PMAP_PTE_A | PMAP_PTE_PCD \
                         | PMAP_PTE_PWT | PMAP_PTE_US | PMAP_PTE_RW \
                         | PMAP_PTE_P)

#ifdef __LP64__
#define PMAP_RPTP_ORDER 0
#define PMAP_NR_LEVELS  4
#define PMAP_L0_BITS    9
#define PMAP_L1_BITS    9
#define PMAP_L2_BITS    9
#define PMAP_L3_BITS    9
#define PMAP_VA_MASK    DECL_CONST(0x0000ffffffffffff, UL)
#define PMAP_PA_MASK    DECL_CONST(0x000ffffffffff000, UL)
#define PMAP_L2_MASK    PMAP_L1_MASK
#define PMAP_L3_MASK    PMAP_L1_MASK
#else /* __LP64__ */
#ifdef X86_PAE
#define PMAP_RPTP_ORDER 2   /* Assume two levels with a 4-page root table */
#define PMAP_NR_LEVELS  2
#define PMAP_L0_BITS    9
#define PMAP_L1_BITS    11
#define PMAP_VA_MASK    DECL_CONST(0xffffffff, UL)
#define PMAP_PA_MASK    DECL_CONST(0x000ffffffffff000, ULL)
#else /* X86_PAE */
#define PMAP_RPTP_ORDER 0
#define PMAP_NR_LEVELS  2
#define PMAP_L0_BITS    10
#define PMAP_L1_BITS    10
#define PMAP_VA_MASK    DECL_CONST(0xffffffff, UL)
#define PMAP_PA_MASK    DECL_CONST(0xfffff000, UL)
#endif /* X86_PAE */
#endif /* __LP64__ */

#define PMAP_L0_SHIFT   12
#define PMAP_L1_SHIFT   (PMAP_L0_SHIFT + PMAP_L0_BITS)
#define PMAP_L2_SHIFT   (PMAP_L1_SHIFT + PMAP_L1_BITS)
#define PMAP_L3_SHIFT   (PMAP_L2_SHIFT + PMAP_L2_BITS)

#define PMAP_L0_PTES_PER_PTP    (1 << PMAP_L0_BITS)
#define PMAP_L1_PTES_PER_PTP    (1 << PMAP_L1_BITS)
#define PMAP_L2_PTES_PER_PTP    (1 << PMAP_L2_BITS)
#define PMAP_L3_PTES_PER_PTP    (1 << PMAP_L3_BITS)

#define PMAP_NR_RPTPS   (1 << PMAP_RPTP_ORDER)

#ifndef __ASSEMBLER__

#include <kern/list.h>
#include <kern/mutex.h>
#include <kern/stdint.h>
#include <kern/types.h>
#include <machine/cpu.h>
#include <machine/trap.h>

#ifdef X86_PAE
typedef uint64_t pmap_pte_t;
#else /* X86_PAE */
typedef unsigned long pmap_pte_t;
#endif /* X86_PAE */

/*
 * Physical address map.
 *
 * TODO Define locking protocol.
 */
struct pmap {
    struct mutex lock;
    struct list node;
    phys_addr_t root_ptp_pa;
#ifdef X86_PAE
    pmap_pte_t *pdpt;

    /* The page-directory-pointer base is always 32-bits wide */
    unsigned long pdpt_pa;
#endif /* X86_PAE */

    /* Processors on which this pmap is loaded */
    struct cpumap cpumap;
};

/*
 * The kernel pmap.
 */
extern struct pmap *kernel_pmap;

/*
 * Per physical page data specific to the pmap module.
 *
 * On this architecture, the number of page table entries is stored in page
 * table page descriptors.
 */
struct pmap_page {
    unsigned short nr_ptes;
};

#define PMAP_DEFINE_PAGE

/*
 * Early initialization of the MMU.
 *
 * This function is called before paging is enabled by the boot module. It
 * maps the kernel at physical and virtual addresses, after which all kernel
 * functions and data can be accessed.
 */
pmap_pte_t * pmap_setup_paging(void);

/*
 * This function is called by the AP bootstrap code before paging is enabled.
 */
pmap_pte_t * pmap_ap_setup_paging(void);

/*
 * Early initialization of the pmap module.
 */
void pmap_bootstrap(void);

/*
 * Early initialization of the MMU on APs.
 */
void pmap_ap_bootstrap(void);

/*
 * Allocate pure virtual memory.
 *
 * This memory is obtained from a very small pool of reserved pages located
 * immediately after the kernel. Its purpose is to allow early mappings to
 * be created before the VM system is available.
 */
unsigned long pmap_bootalloc(unsigned int nr_pages);

/*
 * Set the protection of mappings in a physical map.
 */
void pmap_protect(struct pmap *pmap, unsigned long start, unsigned long end,
                  int prot);

/*
 * Extract a mapping from a physical map.
 *
 * This function walks the page tables to retreive the physical address
 * mapped at the given virtual address. If there is no mapping for the
 * virtual address, 0 is returned (implying that page 0 is always reserved).
 */
phys_addr_t pmap_extract(struct pmap *pmap, unsigned long va);

/*
 * Perform the required TLB invalidations so that a physical map is up to
 * date on all processors using it.
 *
 * Functions that require updating are :
 *  - pmap_enter
 *  - pmap_remove
 *  - pmap_protect
 *
 * If the kernel has reached a state where IPIs may be used to update remote
 * processor TLBs, interrupts must be enabled when calling this function.
 */
void pmap_update(struct pmap *pmap, unsigned long start, unsigned long end);

/*
 * Interrupt handler for inter-processor update requests.
 */
void pmap_update_intr(struct trap_frame *frame);

/*
 * Set up the pmap module.
 *
 * This function should only be called by the VM system, once kernel
 * allocations can be performed safely.
 */
void pmap_setup(void);

/*
 * Create a pmap for a user task.
 */
int pmap_create(struct pmap **pmapp);

/*
 * Create a mapping on a physical map.
 *
 * If protection is VM_PROT_NONE, this function behaves as if it were
 * VM_PROT_READ. There must not be an existing valid mapping for the given
 * virtual address.
 */
int pmap_enter(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot);

/*
 * Remove mappings from a physical map.
 *
 * Non existent mappings are allowed in the given range.
 */
void pmap_remove(struct pmap *pmap, unsigned long start, unsigned long end);

/*
 * Load the given pmap on the current processor.
 *
 * This function must be called with interrupts and preemption disabled.
 */
void pmap_load(struct pmap *pmap);

static inline struct pmap *
pmap_current(void)
{
    return cpu_percpu_get_pmap();
}

#endif /* __ASSEMBLER__ */

#endif /* _X86_PMAP_H */