summaryrefslogtreecommitdiff
path: root/vm/vm_ptable.c
blob: 7b6d230ae6f7f8183d5fc2f44db76b0c6bf17b05 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
/*
 * Copyright (c) 2010-2017 Richard Braun.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 *
 * TODO Review locking.
 */

#include <assert.h>
#include <stdalign.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>

#include <kern/bootmem.h>
#include <kern/cpumap.h>
#include <kern/error.h>
#include <kern/init.h>
#include <kern/kmem.h>
#include <kern/list.h>
#include <kern/log.h>
#include <kern/macros.h>
#include <kern/mutex.h>
#include <kern/panic.h>
#include <kern/percpu.h>
#include <kern/spinlock.h>
#include <kern/syscnt.h>
#include <kern/thread.h>
#include <machine/boot.h>
#include <machine/cpu.h>
#include <machine/page.h>
#include <machine/pmap.h>
#include <machine/tcb.h>
#include <machine/trap.h>
#include <machine/types.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
#include <vm/vm_ptable.h>
#include <vm/vm_prot.h>

static struct vm_ptable_cpu_pt vm_ptable_boot_cpu_pt __bootdata;

/*
 * Structures related to inter-processor page table updates.
 */

#define VM_PTABLE_UPDATE_OP_ENTER       1
#define VM_PTABLE_UPDATE_OP_REMOVE      2
#define VM_PTABLE_UPDATE_OP_PROTECT     3

struct vm_ptable_update_enter_args {
    uintptr_t va;
    phys_addr_t pa;
    int prot;
    int flags;
};

struct vm_ptable_update_remove_args {
    uintptr_t start;
    uintptr_t end;
};

struct vm_ptable_update_protect_args {
    uintptr_t start;
    uintptr_t end;
    int prot;
};

struct vm_ptable_update_op {
    struct cpumap cpumap;
    unsigned int operation;

    union {
        struct vm_ptable_update_enter_args enter_args;
        struct vm_ptable_update_remove_args remove_args;
        struct vm_ptable_update_protect_args protect_args;
    };
};

/*
 * Maximum number of operations that can be batched before an implicit
 * update.
 */
#define VM_PTABLE_UPDATE_MAX_OPS 32

/*
 * List of update operations.
 *
 * A list of update operations is a container of operations that are pending
 * for a pmap. Updating can be implicit, e.g. when a list has reached its
 * maximum size, or explicit, when vm_ptable_update() is called. Operation lists
 * are thread-local objects.
 *
 * The cpumap is the union of all processors affected by at least one
 * operation.
 */
struct vm_ptable_update_oplist {
    alignas(CPU_L1_SIZE) struct cpumap cpumap;
    struct pmap *pmap;
    unsigned int nr_ops;
    struct vm_ptable_update_op ops[VM_PTABLE_UPDATE_MAX_OPS];
};

/*
 * Statically allocated data for the main booter thread.
 */
static struct vm_ptable_update_oplist vm_ptable_booter_oplist __initdata;

/*
 * Each regular thread gets an operation list from this cache.
 */
static struct kmem_cache vm_ptable_update_oplist_cache;

/*
 * Queue holding update requests from remote processors.
 */
struct vm_ptable_update_queue {
    struct spinlock lock;
    struct list requests;
};

/*
 * Syncer thread.
 *
 * There is one such thread per processor. They are the recipients of
 * update requests, providing thread context for the mapping operations
 * they perform.
 */
struct vm_ptable_syncer {
    alignas(CPU_L1_SIZE) struct thread *thread;
    struct vm_ptable_update_queue queue;
    struct syscnt sc_updates;
    struct syscnt sc_update_enters;
    struct syscnt sc_update_removes;
    struct syscnt sc_update_protects;
};

#if 0
static void vm_ptable_sync(void *arg);
#endif

static struct vm_ptable_syncer vm_ptable_syncer __percpu;

/*
 * Maximum number of mappings for which individual TLB invalidations can be
 * performed. Global TLB flushes are done beyond this value.
 */
#define VM_PTABLE_UPDATE_MAX_MAPPINGS 64

/*
 * Per processor request, queued on a remote processor.
 *
 * The number of mappings is used to determine whether it's best to flush
 * individual TLB entries or globally flush the TLB.
 */
struct vm_ptable_update_request {
    alignas(CPU_L1_SIZE) struct list node;
    struct spinlock lock;
    struct thread *sender;
    const struct vm_ptable_update_oplist *oplist;
    unsigned int nr_mappings;
    int done;
    int error;
};

/*
 * Per processor array of requests.
 *
 * When an operation list is to be applied, the thread triggering the update
 * acquires the processor-local array of requests and uses it to queue requests
 * on remote processors.
 */
struct vm_ptable_update_request_array {
    struct vm_ptable_update_request requests[CONFIG_MAX_CPUS];
    struct mutex lock;
};

static struct vm_ptable_update_request_array vm_ptable_update_request_array
    __percpu;

static int vm_ptable_do_remote_updates __read_mostly;

static char vm_ptable_panic_inval_msg[] __bootdata
    = "vm_ptable: invalid physical address";

static __always_inline unsigned long
vm_ptable_pte_index(uintptr_t va, const struct vm_ptable_level *pt_level)
{
    return ((va >> pt_level->skip) & ((1UL << pt_level->bits) - 1));
}

void __boot
vm_ptable_init(struct vm_ptable *ptable,
               const struct vm_ptable_level *pt_levels,
               unsigned int nr_levels)
{
    const struct vm_ptable_level *pt_level;
    struct vm_ptable_cpu_pt *pt;

    assert(nr_levels != 0);

    pt_level = &pt_levels[nr_levels - 1];
    pt = &vm_ptable_boot_cpu_pt;
    pt->root = bootmem_alloc(pt_level->ptes_per_pt * sizeof(pmap_pte_t));
    ptable->cpu_pts[0] = pt;

    for (size_t i = 1; i < ARRAY_SIZE(ptable->cpu_pts); i++) {
        ptable->cpu_pts[i] = NULL;
    }

    ptable->pt_levels = pt_levels;
    ptable->nr_levels = nr_levels;
}

static __always_inline phys_addr_t
vm_ptable_pa_mask(const struct vm_ptable *ptable, unsigned int level)
{
    phys_addr_t size;

    if (level == 0) {
        return ~PAGE_MASK;
    } else {
        size = ((phys_addr_t)1 << ptable->pt_levels[level - 1].bits)
               * sizeof(pmap_pte_t);
        return ~(size - 1);
    }
}

static __always_inline bool
vm_ptable_pa_aligned(const struct vm_ptable *ptable, phys_addr_t pa)
{
    return pa == (pa & vm_ptable_pa_mask(ptable, 0));
}

void __boot
vm_ptable_boot_enter(struct vm_ptable *ptable, uintptr_t va,
                     phys_addr_t pa, size_t pgsize)
{
    const struct vm_ptable_level *pt_level;
    unsigned int level, last_level;
    pmap_pte_t *pt, *next_pt, *pte;
    phys_addr_t mask;

    if (!vm_ptable_pa_aligned(ptable, pa)) {
        boot_panic(vm_ptable_panic_inval_msg);
    }

#if 0
    switch (pgsize) {
    case (1 << PMAP_L1_SKIP):
        last_level = 1;
        break;
    default:
#endif
    last_level = 0;
    pt = ptable->cpu_pts[0]->root;

    for (level = ptable->nr_levels - 1; level != last_level; level--) {
        pt_level = &ptable->pt_levels[level];
        pte = &pt[vm_ptable_pte_index(va, pt_level)];

        if (pmap_pte_valid(*pte)) {
            mask = vm_ptable_pa_mask(ptable, level);
            next_pt = (void *)(uintptr_t)(*pte & mask);
        } else {
            next_pt = bootmem_alloc(pt_level->ptes_per_pt * sizeof(pmap_pte_t));
            *pte = pt_level->make_pte_fn((uintptr_t)next_pt, VM_PROT_ALL);
        }

        pt = next_pt;
    }

    pt_level = &ptable->pt_levels[last_level];
    pte = &pt[vm_ptable_pte_index(va, pt_level)];
    *pte = pt_level->make_ll_pte_fn(pa, VM_PROT_ALL);
}

pmap_pte_t * __boot
vm_ptable_boot_root(const struct vm_ptable *ptable)
{
    return ptable->cpu_pts[0]->root;
}