summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init.c1
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c2
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c6
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S6
-rw-r--r--arch/x86/mm/mmap.c10
-rw-r--r--arch/x86/mm/pat/memtype.c6
-rw-r--r--arch/x86/mm/pat/set_memory.c20
-rw-r--r--arch/x86/mm/tlb.c24
9 files changed, 43 insertions, 34 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index bb57e93b4caf..8bf6ad4b9400 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -34,6 +34,7 @@
* We need to define the tracepoints somewhere, and tlb.c
* is only compiled when SMP=y.
*/
+#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h>
#include "mm_internal.h"
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b9426fce5f3e..0e4270e20fad 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1031,7 +1031,7 @@ static void __meminit free_pagetable(struct page *page, int order)
free_reserved_pages(page, nr_pages);
#endif
} else {
- free_pages((unsigned long)page_address(page), order);
+ __free_pages(page, order);
}
}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0539efd0d216..998b6010d6d3 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -451,5 +451,5 @@ void __init kasan_init(void)
__flush_tlb_all();
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized\n");
+ kasan_init_generic();
}
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index faf3a13fb6ba..2f8c32173972 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -536,12 +536,6 @@ void __init sme_early_init(void)
x86_init.resources.dmi_setup = snp_dmi_setup;
}
- /*
- * Switch the SVSM CA mapping (if active) from identity mapped to
- * kernel mapped.
- */
- snp_update_svsm_ca();
-
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
}
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index f8a33b25ae86..edbf9c998848 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -16,7 +16,7 @@
.text
.code64
-SYM_FUNC_START(sme_encrypt_execute)
+SYM_FUNC_START(__pi_sme_encrypt_execute)
/*
* Entry parameters:
@@ -69,9 +69,9 @@ SYM_FUNC_START(sme_encrypt_execute)
ANNOTATE_UNRET_SAFE
ret
int3
-SYM_FUNC_END(sme_encrypt_execute)
+SYM_FUNC_END(__pi_sme_encrypt_execute)
-SYM_FUNC_START(__enc_copy)
+SYM_FUNC_START_LOCAL(__enc_copy)
ANNOTATE_NOENDBR
/*
* Routine used to encrypt memory in place.
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 5ed2109211da..82f3a987f7cf 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -80,7 +80,7 @@ unsigned long arch_mmap_rnd(void)
}
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
@@ -110,7 +110,7 @@ static unsigned long mmap_legacy_base(unsigned long rnd,
*/
static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
unsigned long random_factor, unsigned long task_size,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
*legacy_base = mmap_legacy_base(random_factor, task_size);
if (mmap_is_legacy())
@@ -119,12 +119,12 @@ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
*base = mmap_base(random_factor, task_size, rlim_stack);
}
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
if (mmap_is_legacy())
- clear_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_clear(MMF_TOPDOWN, mm);
else
- set_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_set(MMF_TOPDOWN, mm);
arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index c09284302dd3..b68200a0e0c6 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -126,7 +126,7 @@ __setup("debugpat", pat_debug_setup);
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
- unsigned long pg_flags = pg->flags & _PGMT_MASK;
+ unsigned long pg_flags = pg->flags.f & _PGMT_MASK;
if (pg_flags == _PGMT_WB)
return _PAGE_CACHE_MODE_WB;
@@ -161,10 +161,10 @@ static inline void set_page_memtype(struct page *pg,
break;
}
- old_flags = READ_ONCE(pg->flags);
+ old_flags = READ_ONCE(pg->flags.f);
do {
new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
- } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags));
+ } while (!try_cmpxchg(&pg->flags.f, &old_flags, new_flags));
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 8834c76f91c9..970981893c9b 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -399,15 +399,6 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
-static void __cpa_flush_tlb(void *data)
-{
- struct cpa_data *cpa = data;
- unsigned int i;
-
- for (i = 0; i < cpa->numpages; i++)
- flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
-}
-
static int collapse_large_pages(unsigned long addr, struct list_head *pgtables);
static void cpa_collapse_large_pages(struct cpa_data *cpa)
@@ -444,6 +435,7 @@ static void cpa_collapse_large_pages(struct cpa_data *cpa)
static void cpa_flush(struct cpa_data *cpa, int cache)
{
+ unsigned long start, end;
unsigned int i;
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
@@ -453,10 +445,12 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
goto collapse_large_pages;
}
- if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
- flush_tlb_all();
- else
- on_each_cpu(__cpa_flush_tlb, cpa, 1);
+ start = fix_addr(__cpa_addr(cpa, 0));
+ end = start + cpa->numpages * PAGE_SIZE;
+ if (cpa->force_flush_all)
+ end = TLB_FLUSH_ALL;
+
+ flush_tlb_kernel_range(start, end);
if (!cache)
goto collapse_large_pages;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 39f80111e6f1..5d221709353e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
- barrier();
- /* Start receiving IPIs and then read tlb_gen (and LAM below) */
+ /*
+ * Make sure this CPU is set in mm_cpumask() such that we'll
+ * receive invalidation IPIs.
+ *
+ * Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
+ * operation, or explicitly provide one. Such that:
+ *
+ * switch_mm_irqs_off() flush_tlb_mm_range()
+ * smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
+ * smp_mb(); // here // smp_mb() implied
+ * atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
+ *
+ * we properly order against flush_tlb_mm_range(), where the
+ * loaded_mm load can happen in mative_flush_tlb_multi() ->
+ * should_flush_tlb().
+ *
+ * This way switch_mm() must see the new tlb_gen or
+ * flush_tlb_mm_range() must see the new loaded_mm, or both.
+ */
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
+ else
+ smp_mb();
+
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
ns = choose_new_asid(next, next_tlb_gen);