summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig7
-rw-r--r--arch/arm64/include/asm/hugetlb.h6
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mte-kasan.h6
-rw-r--r--arch/arm64/include/asm/mte.h16
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/mte.c18
-rw-r--r--arch/arm64/mm/flush.c8
-rw-r--r--arch/arm64/mm/kasan_init.c4
-rw-r--r--arch/arm64/mm/mmu.c2
10 files changed, 45 insertions, 25 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b3e13f67d598..6663ffd23f25 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1495,8 +1495,7 @@ choice
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
- # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
- depends on (AS_IS_GNU || AS_VERSION >= 150000) && BROKEN
+ depends on BROKEN
help
Say Y if you plan on running a kernel with a big-endian userspace.
@@ -1550,7 +1549,6 @@ source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
- select SPARSEMEM_VMEMMAP
config HW_PERF_EVENTS
def_bool y
@@ -2327,8 +2325,7 @@ config STACKPROTECTOR_PER_TASK
config UNWIND_PATCH_PAC_INTO_SCS
bool "Enable shadow call stack dynamically using code patching"
- # needs Clang with https://github.com/llvm/llvm-project/commit/de07cde67b5d205d58690be012106022aea6d2b3 incorporated
- depends on CC_IS_CLANG && CLANG_VERSION >= 150000
+ depends on CC_IS_CLANG
depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET
depends on SHADOW_CALL_STACK
select UNWIND_TABLES
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2a8155c4a882..44c1f757bfcf 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
- clear_bit(PG_mte_tagged, &folio->flags);
- clear_bit(PG_mte_lock, &folio->flags);
+ clear_bit(PG_mte_tagged, &folio->flags.f);
+ clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 5213248e081b..f1505c4acb38 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -308,6 +308,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
#define arch_enable_tag_checks_async() mte_enable_kernel_async()
#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
+#define arch_enable_tag_checks_write_only() mte_enable_kernel_store_only()
#define arch_suppress_tag_checks_start() mte_enable_tco()
#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 2e98028c1965..0f9b08e8fb8d 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -200,6 +200,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);
+int mte_enable_kernel_store_only(void);
#else /* CONFIG_ARM64_MTE */
@@ -251,6 +252,11 @@ static inline void mte_enable_kernel_asymm(void)
{
}
+static inline int mte_enable_kernel_store_only(void)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 6567df8ec8ca..3b5069f4683d 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
* before the page flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &page->flags);
+ set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
- bool ret = test_bit(PG_mte_tagged, &page->flags);
+ bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
- if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
* before the folio flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &folio->flags);
+ set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
- bool ret = test_bit(PG_mte_tagged, &folio->flags);
+ bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
- if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+ if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 63cd05e6973d..af6fd64a8a19 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2956,7 +2956,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "Store Only MTE Tag Check",
.capability = ARM64_MTE_STORE_ONLY,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, MTESTOREONLY, IMP)
},
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index e5e773844889..54a52dc5c1ae 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -157,6 +157,24 @@ void mte_enable_kernel_asymm(void)
mte_enable_kernel_sync();
}
}
+
+int mte_enable_kernel_store_only(void)
+{
+ /*
+ * If the CPU does not support MTE store only,
+ * the kernel checks all operations.
+ */
+ if (!cpus_have_cap(ARM64_MTE_STORE_ONLY))
+ return -EINVAL;
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCSO_MASK,
+ SYS_FIELD_PREP(SCTLR_EL1, TCSO, 1));
+ isb();
+
+ pr_info_once("MTE: enabled store only mode at EL1\n");
+
+ return 0;
+}
#endif
#ifdef CONFIG_KASAN_HW_TAGS
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 013eead9b695..fbf08b543c3f 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -53,11 +53,11 @@ void __sync_icache_dcache(pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
- if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
sync_icache_aliases((unsigned long)folio_address(folio),
(unsigned long)folio_address(folio) +
folio_size(folio));
- set_bit(PG_dcache_clean, &folio->flags);
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -69,8 +69,8 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
*/
void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
EXPORT_SYMBOL(flush_dcache_folio);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d541ce45daeb..abeb81bf6ebd 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -399,14 +399,12 @@ void __init kasan_init(void)
{
kasan_init_shadow();
kasan_init_depth();
-#if defined(CONFIG_KASAN_GENERIC)
+ kasan_init_generic();
/*
* Generic KASAN is now fully initialized.
* Software and Hardware Tag-Based modes still require
* kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly.
*/
- pr_info("KernelAddressSanitizer initialized (generic)\n");
-#endif
}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3a444a5fe469..b3d8c3de4149 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1239,7 +1239,7 @@ static void free_hotplug_page_range(struct page *page, size_t size,
vmem_altmap_free(altmap, size >> PAGE_SHIFT);
} else {
WARN_ON(PageReserved(page));
- free_pages((unsigned long)page_address(page), get_order(size));
+ __free_pages(page, get_order(size));
}
}