summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/hugetlb.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h34
-rw-r--r--arch/arm64/include/asm/kvm_host.h5
-rw-r--r--arch/arm64/include/asm/kvm_nested.h27
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h1
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mte-kasan.h6
-rw-r--r--arch/arm64/include/asm/mte.h16
-rw-r--r--arch/arm64/include/asm/traps.h1
-rw-r--r--arch/arm64/include/asm/vncr_mapping.h2
11 files changed, 80 insertions, 21 deletions
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2a8155c4a882..44c1f757bfcf 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
- clear_bit(PG_mte_tagged, &folio->flags);
- clear_bit(PG_mte_lock, &folio->flags);
+ clear_bit(PG_mte_tagged, &folio->flags.f);
+ clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index bec227f9500a..9da54d4ee49e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -81,6 +81,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
+ __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fa8a08a1ccd5..c9eab316398e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -220,6 +220,20 @@ static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
{
+ /*
+ * DDI0487L.b Known Issue D22105
+ *
+ * When executing at EL2 and HCR_EL2.{E2H,TGE} = {1, 0} it is
+ * IMPLEMENTATION DEFINED whether the effective value of HCR_EL2.AMO
+ * is the value programmed or 1.
+ *
+ * Make the implementation choice of treating the effective value as 1 as
+ * we cannot subsequently catch changes to TGE or AMO that would
+ * otherwise lead to the SError becoming deliverable.
+ */
+ if (vcpu_is_el2(vcpu) && vcpu_el2_e2h_is_set(vcpu) && !vcpu_el2_tge_is_set(vcpu))
+ return true;
+
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
}
@@ -511,21 +525,29 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu)) {
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
- u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+ enum vcpu_sysreg r;
+ u64 sctlr;
+
+ r = vcpu_has_nv(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
+
+ sctlr = vcpu_read_sys_reg(vcpu, r);
sctlr |= SCTLR_ELx_EE;
- vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
+ vcpu_write_sys_reg(vcpu, sctlr, r);
}
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
+ enum vcpu_sysreg r;
+ u64 bit;
+
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
- if (vcpu_mode_priv(vcpu))
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
- else
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
+ r = is_hyp_ctxt(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
+ bit = vcpu_mode_priv(vcpu) ? SCTLR_ELx_EE : SCTLR_EL1_E0E;
+
+ return vcpu_read_sys_reg(vcpu, r) & bit;
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0ee4f6fa3a17..b763293281c8 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -252,7 +252,8 @@ struct kvm_protected_vm {
pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
struct kvm_hyp_memcache stage2_teardown_mc;
- bool enabled;
+ bool is_protected;
+ bool is_created;
};
struct kvm_mpidr_data {
@@ -1442,7 +1443,7 @@ struct kvm *kvm_arch_alloc_vm(void);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
-#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
+#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected)
#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 7fd76f41c296..f7c06a840963 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -83,6 +83,8 @@ extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
+extern void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu);
+
struct kvm_s2_trans {
phys_addr_t output;
unsigned long block_size;
@@ -265,7 +267,7 @@ static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
return base;
}
-static inline unsigned int ps_to_output_size(unsigned int ps)
+static inline unsigned int ps_to_output_size(unsigned int ps, bool pa52bit)
{
switch (ps) {
case 0: return 32;
@@ -273,7 +275,10 @@ static inline unsigned int ps_to_output_size(unsigned int ps)
case 2: return 40;
case 3: return 42;
case 4: return 44;
- case 5:
+ case 5: return 48;
+ case 6: if (pa52bit)
+ return 52;
+ fallthrough;
default:
return 48;
}
@@ -285,13 +290,28 @@ enum trans_regime {
TR_EL2,
};
+struct s1_walk_info;
+
+struct s1_walk_context {
+ struct s1_walk_info *wi;
+ u64 table_ipa;
+ int level;
+};
+
+struct s1_walk_filter {
+ int (*fn)(struct s1_walk_context *, void *);
+ void *priv;
+};
+
struct s1_walk_info {
+ struct s1_walk_filter *filter;
u64 baddr;
enum trans_regime regime;
unsigned int max_oa_bits;
unsigned int pgshift;
unsigned int txsz;
int sl;
+ u8 sh;
bool as_el0;
bool hpd;
bool e0poe;
@@ -299,6 +319,7 @@ struct s1_walk_info {
bool pan;
bool be;
bool s2;
+ bool pa52bit;
};
struct s1_walk_result {
@@ -334,6 +355,8 @@ struct s1_walk_result {
int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
struct s1_walk_result *wr, u64 va);
+int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa,
+ int *level);
/* VNCR management */
int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index ea58282f59bb..08be89c95466 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -18,6 +18,7 @@
int pkvm_init_host_vm(struct kvm *kvm);
int pkvm_create_hyp_vm(struct kvm *kvm);
+bool pkvm_hyp_vm_is_created(struct kvm *kvm);
void pkvm_destroy_hyp_vm(struct kvm *kvm);
int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 5213248e081b..f1505c4acb38 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -308,6 +308,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
#define arch_enable_tag_checks_async() mte_enable_kernel_async()
#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
+#define arch_enable_tag_checks_write_only() mte_enable_kernel_store_only()
#define arch_suppress_tag_checks_start() mte_enable_tco()
#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 2e98028c1965..0f9b08e8fb8d 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -200,6 +200,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);
+int mte_enable_kernel_store_only(void);
#else /* CONFIG_ARM64_MTE */
@@ -251,6 +252,11 @@ static inline void mte_enable_kernel_asymm(void)
{
}
+static inline int mte_enable_kernel_store_only(void)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 6567df8ec8ca..3b5069f4683d 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
* before the page flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &page->flags);
+ set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
- bool ret = test_bit(PG_mte_tagged, &page->flags);
+ bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
- if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
* before the folio flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &folio->flags);
+ set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
- bool ret = test_bit(PG_mte_tagged, &folio->flags);
+ bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
- if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+ if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index e3e8944a71c3..e92e4a0e48fc 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -36,6 +36,7 @@ int kasan_brk_handler(struct pt_regs *regs, unsigned long esr);
int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr);
int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
+void dump_kernel_instr(unsigned long kaddr);
/*
* Move regs->pc to next instruction and do necessary setup before it
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
index f6ec500ad3fa..c2485a862e69 100644
--- a/arch/arm64/include/asm/vncr_mapping.h
+++ b/arch/arm64/include/asm/vncr_mapping.h
@@ -94,6 +94,8 @@
#define VNCR_PMSICR_EL1 0x838
#define VNCR_PMSIRR_EL1 0x840
#define VNCR_PMSLATFR_EL1 0x848
+#define VNCR_PMSNEVFR_EL1 0x850
+#define VNCR_PMSDSFR_EL1 0x858
#define VNCR_TRFCR_EL1 0x880
#define VNCR_MPAM1_EL1 0x900
#define VNCR_MPAMHCR_EL2 0x930