diff options
Diffstat (limited to 'arch/arm64/kvm')
56 files changed, 5499 insertions, 1853 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 096e45acadb2d..713248f240e03 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -19,7 +19,6 @@ if VIRTUALIZATION menuconfig KVM bool "Kernel-based Virtual Machine (KVM) support" - depends on AS_HAS_ARMV8_4 select KVM_COMMON select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_MMU_NOTIFIER diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 209bc76263f10..3ebc0570345cc 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -14,7 +14,7 @@ CFLAGS_sys_regs.o += -Wno-override-init CFLAGS_handle_exit.o += -Wno-override-init kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ - inject_fault.o va_layout.o handle_exit.o \ + inject_fault.o va_layout.o handle_exit.o config.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ arch_timer.o trng.o vmid.o emulate-nested.o nested.o at.o \ @@ -23,7 +23,8 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ - vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o + vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o \ + vgic/vgic-v5.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 5133dcbfe9f76..dbd74e4885e24 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; + __vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl); break; case TIMER_PTIMER: - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; + __vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl); break; case TIMER_HVTIMER: - __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl; + __vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl); break; case TIMER_HPTIMER: - __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl; + __vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl); break; default: WARN_ON(1); @@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: - __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; + __vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval); break; case TIMER_PTIMER: - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; + __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval); break; case TIMER_HVTIMER: - __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval; + __vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval); break; case TIMER_HPTIMER: - __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval; + __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval); break; default: WARN_ON(1); @@ -830,7 +830,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) * by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but * not both). This simplifies the handling of the EL1NV* bits. */ - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + if (is_nested_ctxt(vcpu)) { u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); /* Use the VHE format for mental sanity */ @@ -1036,7 +1036,7 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) if (vcpu_has_nv(vcpu)) { struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset; - offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2); + offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2); offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset; } @@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, mutex_lock(&kvm->lock); - if (lock_all_vcpus(kvm)) { + if (!kvm_trylock_all_vcpus(kvm)) { set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags); /* @@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, kvm->arch.timer_data.voffset = offset->counter_offset; kvm->arch.timer_data.poffset = offset->counter_offset; - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); } else { ret = -EBUSY; } diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 19ca57def6292..888f7c7abf547 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -368,6 +368,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1); break; + case KVM_CAP_ARM_EL2: + r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT); + break; + case KVM_CAP_ARM_EL2_E2H0: + r = cpus_have_final_cap(ARM64_HAS_HCR_NV1); + break; case KVM_CAP_GUEST_DEBUG_HW_BPS: r = get_num_brps(); break; @@ -402,6 +408,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: r = BIT(0); break; + case KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED: + if (!kvm) + r = -EINVAL; + else + r = kvm_supports_cacheable_pfnmap(); + break; + default: r = 0; } @@ -515,7 +528,7 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) * Either we're running an L2 guest, and the API/APK bits come * from L1's HCR_EL2, or API/APK are both set. */ - if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { + if (unlikely(is_nested_ctxt(vcpu))) { u64 val; val = __vcpu_sys_reg(vcpu, HCR_EL2); @@ -734,7 +747,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { - bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); + bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE); + return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); } @@ -819,10 +833,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (!kvm_arm_vcpu_is_finalized(vcpu)) return -EPERM; - ret = kvm_arch_vcpu_run_map_fp(vcpu); - if (ret) - return ret; - if (likely(vcpu_has_run_once(vcpu))) return 0; @@ -843,6 +853,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) return ret; if (vcpu_has_nv(vcpu)) { + ret = kvm_vcpu_allocate_vncr_tlb(vcpu); + if (ret) + return ret; + ret = kvm_vgic_vcpu_nv_init(vcpu); if (ret) return ret; @@ -1177,6 +1191,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ preempt_disable(); + kvm_nested_flush_hwstate(vcpu); + if (kvm_vcpu_has_pmu(vcpu)) kvm_pmu_flush_hwstate(vcpu); @@ -1276,6 +1292,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, ret); + kvm_nested_sync_hwstate(vcpu); + preempt_enable(); /* @@ -1914,49 +1932,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) } } -/* unlocks vcpus from @vcpu_lock_idx and smaller */ -static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) -{ - struct kvm_vcpu *tmp_vcpu; - - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { - tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); - mutex_unlock(&tmp_vcpu->mutex); - } -} - -void unlock_all_vcpus(struct kvm *kvm) -{ - lockdep_assert_held(&kvm->lock); - - unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); -} - -/* Returns true if all vcpus were locked, false otherwise */ -bool lock_all_vcpus(struct kvm *kvm) -{ - struct kvm_vcpu *tmp_vcpu; - unsigned long c; - - lockdep_assert_held(&kvm->lock); - - /* - * Any time a vcpu is in an ioctl (including running), the - * core KVM code tries to grab the vcpu->mutex. - * - * By grabbing the vcpu->mutex of all VCPUs we ensure that no - * other VCPUs can fiddle with the state while we access it. - */ - kvm_for_each_vcpu(c, tmp_vcpu, kvm) { - if (!mutex_trylock(&tmp_vcpu->mutex)) { - unlock_vcpus(kvm, c - 1); - return false; - } - } - - return true; -} - static unsigned long nvhe_percpu_size(void) { return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - @@ -2162,7 +2137,7 @@ static void cpu_hyp_init(void *discard) static void cpu_hyp_uninit(void *discard) { - if (__this_cpu_read(kvm_hyp_initialized)) { + if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) { cpu_hyp_reset(); __this_cpu_write(kvm_hyp_initialized, 0); } @@ -2378,8 +2353,13 @@ static void __init teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { + if (per_cpu(kvm_hyp_initialized, cpu)) + continue; + free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); - free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + + if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]) + continue; if (free_sve) { struct cpu_sve_state *sve_state; @@ -2387,6 +2367,9 @@ static void __init teardown_hyp_mode(void) sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); } + + free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + } } @@ -2450,6 +2433,19 @@ static void kvm_hyp_init_symbols(void) kvm_nvhe_sym(__icache_flags) = __icache_flags; kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; + /* Propagate the FGT state to the the nVHE side */ + kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks; + kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks; + kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks; + kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks; + kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks; + kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks; + kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks; + kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks; + kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks; + kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks; + kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks; + /* * Flush entire BSS since part of its data containing init symbols is read * while the MMU is off. @@ -2604,6 +2600,13 @@ static int __init init_hyp_mode(void) goto out_err; } + err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start), + kvm_ksym_ref(__hyp_data_end), PAGE_HYP); + if (err) { + kvm_err("Cannot map .hyp.data section\n"); + goto out_err; + } + err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); if (err) { @@ -2760,6 +2763,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, &irqfd->irq_entry); } + void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { @@ -2770,8 +2774,26 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, if (irq_entry->type != KVM_IRQ_ROUTING_MSI) return; - kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, - &irqfd->irq_entry); + kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq); +} + +void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) +{ + if (old->type == KVM_IRQ_ROUTING_MSI && + new->type == KVM_IRQ_ROUTING_MSI && + !memcmp(&old->msi, &new->msi, sizeof(new->msi))) + return; + + /* + * Remapping the vLPI requires taking the its_lock mutex to resolve + * the new translation. We're in spinlock land at this point, so no + * chance of resolving the translation. + * + * Unmap the vLPI and fall back to software LPI injection. + */ + return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq); } void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c index f74a66ce3064b..0e56105339493 100644 --- a/arch/arm64/kvm/at.c +++ b/arch/arm64/kvm/at.c @@ -10,61 +10,11 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> -enum trans_regime { - TR_EL10, - TR_EL20, - TR_EL2, -}; - -struct s1_walk_info { - u64 baddr; - enum trans_regime regime; - unsigned int max_oa_bits; - unsigned int pgshift; - unsigned int txsz; - int sl; - bool hpd; - bool e0poe; - bool poe; - bool pan; - bool be; - bool s2; -}; - -struct s1_walk_result { - union { - struct { - u64 desc; - u64 pa; - s8 level; - u8 APTable; - bool UXNTable; - bool PXNTable; - bool uwxn; - bool uov; - bool ur; - bool uw; - bool ux; - bool pwxn; - bool pov; - bool pr; - bool pw; - bool px; - }; - struct { - u8 fst; - bool ptw; - bool s2; - }; - }; - bool failed; -}; - -static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool ptw, bool s2) +static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw) { wr->fst = fst; - wr->ptw = ptw; - wr->s2 = s2; + wr->ptw = s1ptw; + wr->s2 = s1ptw; wr->failed = true; } @@ -145,20 +95,15 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi) } } -static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi, +static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, struct s1_walk_result *wr, u64 va) { u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr; unsigned int stride, x; - bool va55, tbi, lva, as_el0; + bool va55, tbi, lva; hcr = __vcpu_sys_reg(vcpu, HCR_EL2); - wi->regime = compute_translation_regime(vcpu, op); - as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W); - wi->pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) && - (*vcpu_cpsr(vcpu) & PSR_PAN_BIT); - va55 = va & BIT(55); if (wi->regime == TR_EL2 && va55) @@ -319,7 +264,7 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi, /* R_BNDVG and following statements */ if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) && - as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0))) + wi->as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0))) goto transfault_l0; /* AArch64.S1StartLevel() */ @@ -345,11 +290,11 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi, return 0; addrsz: /* Address Size Fault level 0 */ - fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false, false); + fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false); return -EFAULT; transfault_l0: /* Translation Fault level 0 */ - fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false, false); + fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false); return -EFAULT; } @@ -380,13 +325,13 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, if (ret) { fail_s1_walk(wr, (s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level, - true, true); + true); return ret; } if (!kvm_s2_trans_readable(&s2_trans)) { fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level), - true, true); + true); return -EPERM; } @@ -396,8 +341,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc)); if (ret) { - fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), - true, false); + fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), false); return ret; } @@ -457,6 +401,11 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, if (check_output_size(desc & GENMASK(47, va_bottom), wi)) goto addrsz; + if (!(desc & PTE_AF)) { + fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false); + return -EACCES; + } + va_bottom += contiguous_bit_shift(desc, wi, level); wr->failed = false; @@ -465,13 +414,40 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, wr->pa = desc & GENMASK(47, va_bottom); wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0); + wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG); + if (wr->nG) { + u64 asid_ttbr, tcr; + + switch (wi->regime) { + case TR_EL10: + tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); + asid_ttbr = ((tcr & TCR_A1) ? + vcpu_read_sys_reg(vcpu, TTBR1_EL1) : + vcpu_read_sys_reg(vcpu, TTBR0_EL1)); + break; + case TR_EL20: + tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); + asid_ttbr = ((tcr & TCR_A1) ? + vcpu_read_sys_reg(vcpu, TTBR1_EL2) : + vcpu_read_sys_reg(vcpu, TTBR0_EL2)); + break; + default: + BUG(); + } + + wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr); + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || + !(tcr & TCR_ASID16)) + wr->asid &= GENMASK(7, 0); + } + return 0; addrsz: - fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), true, false); + fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), false); return -EINVAL; transfault: - fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), true, false); + fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), false); return -ENOENT; } @@ -488,7 +464,6 @@ struct mmu_config { u64 sctlr; u64 vttbr; u64 vtcr; - u64 hcr; }; static void __mmu_config_save(struct mmu_config *config) @@ -511,13 +486,10 @@ static void __mmu_config_save(struct mmu_config *config) config->sctlr = read_sysreg_el1(SYS_SCTLR); config->vttbr = read_sysreg(vttbr_el2); config->vtcr = read_sysreg(vtcr_el2); - config->hcr = read_sysreg(hcr_el2); } static void __mmu_config_restore(struct mmu_config *config) { - write_sysreg(config->hcr, hcr_el2); - /* * ARM errata 1165522 and 1530923 require TGE to be 1 before * we update the guest state. @@ -1075,34 +1047,51 @@ static void compute_s1_overlay_permissions(struct kvm_vcpu *vcpu, idx = FIELD_GET(PTE_PO_IDX_MASK, wr->desc); - switch (wi->regime) { - case TR_EL10: - pov_perms = perm_idx(vcpu, POR_EL1, idx); - uov_perms = perm_idx(vcpu, POR_EL0, idx); - break; - case TR_EL20: - pov_perms = perm_idx(vcpu, POR_EL2, idx); - uov_perms = perm_idx(vcpu, POR_EL0, idx); - break; - case TR_EL2: - pov_perms = perm_idx(vcpu, POR_EL2, idx); - uov_perms = 0; - break; - } + if (wr->pov) { + switch (wi->regime) { + case TR_EL10: + pov_perms = perm_idx(vcpu, POR_EL1, idx); + break; + case TR_EL20: + pov_perms = perm_idx(vcpu, POR_EL2, idx); + break; + case TR_EL2: + pov_perms = perm_idx(vcpu, POR_EL2, idx); + break; + } + + if (pov_perms & ~POE_RWX) + pov_perms = POE_NONE; - if (pov_perms & ~POE_RWX) - pov_perms = POE_NONE; + /* R_QXXPC, S1PrivOverflow enabled */ + if (wr->pwxn && (pov_perms & POE_X)) + pov_perms &= ~POE_W; - if (wi->poe && wr->pov) { wr->pr &= pov_perms & POE_R; wr->pw &= pov_perms & POE_W; wr->px &= pov_perms & POE_X; } - if (uov_perms & ~POE_RWX) - uov_perms = POE_NONE; + if (wr->uov) { + switch (wi->regime) { + case TR_EL10: + uov_perms = perm_idx(vcpu, POR_EL0, idx); + break; + case TR_EL20: + uov_perms = perm_idx(vcpu, POR_EL0, idx); + break; + case TR_EL2: + uov_perms = 0; + break; + } + + if (uov_perms & ~POE_RWX) + uov_perms = POE_NONE; + + /* R_NPBXC, S1UnprivOverlay enabled */ + if (wr->uwxn && (uov_perms & POE_X)) + uov_perms &= ~POE_W; - if (wi->e0poe && wr->uov) { wr->ur &= uov_perms & POE_R; wr->uw &= uov_perms & POE_W; wr->ux &= uov_perms & POE_X; @@ -1123,24 +1112,15 @@ static void compute_s1_permissions(struct kvm_vcpu *vcpu, if (!wi->hpd) compute_s1_hierarchical_permissions(vcpu, wi, wr); - if (wi->poe || wi->e0poe) - compute_s1_overlay_permissions(vcpu, wi, wr); + compute_s1_overlay_permissions(vcpu, wi, wr); - /* R_QXXPC */ - if (wr->pwxn) { - if (!wr->pov && wr->pw) - wr->px = false; - if (wr->pov && wr->px) - wr->pw = false; - } + /* R_QXXPC, S1PrivOverlay disabled */ + if (!wr->pov) + wr->px &= !(wr->pwxn && wr->pw); - /* R_NPBXC */ - if (wr->uwxn) { - if (!wr->uov && wr->uw) - wr->ux = false; - if (wr->uov && wr->ux) - wr->uw = false; - } + /* R_NPBXC, S1UnprivOverlay disabled */ + if (!wr->uov) + wr->ux &= !(wr->uwxn && wr->uw); pan = wi->pan && (wr->ur || wr->uw || (pan3_enabled(vcpu, wi->regime) && wr->ux)); @@ -1155,7 +1135,12 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) bool perm_fail = false; int ret, idx; - ret = setup_s1_walk(vcpu, op, &wi, &wr, vaddr); + wi.regime = compute_translation_regime(vcpu, op); + wi.as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W); + wi.pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) && + (*vcpu_cpsr(vcpu) & PSR_PAN_BIT); + + ret = setup_s1_walk(vcpu, &wi, &wr, vaddr); if (ret) goto compute_par; @@ -1198,7 +1183,7 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) } if (perm_fail) - fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false, false); + fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false); compute_par: return compute_par_s1(vcpu, &wr, wi.regime); @@ -1210,7 +1195,8 @@ compute_par: * If the translation is unsuccessful, the value may only contain * PAR_EL1.F, and cannot be taken at face value. It isn't an * indication of the translation having failed, only that the fast - * path did not succeed, *unless* it indicates a S1 permission fault. + * path did not succeed, *unless* it indicates a S1 permission or + * access fault. */ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) { @@ -1266,8 +1252,8 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) __load_stage2(mmu, mmu->arch); skip_mmu_switch: - /* Clear TGE, enable S2 translation, we're rolling */ - write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2); + /* Temporarily switch back to guest context */ + write_sysreg_hcr(vcpu->arch.hcr_el2); isb(); switch (op) { @@ -1299,6 +1285,8 @@ skip_mmu_switch: if (!fail) par = read_sysreg_par(); + write_sysreg_hcr(HCR_HOST_VHE_FLAGS); + if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))) __mmu_config_restore(&config); @@ -1313,19 +1301,29 @@ static bool par_check_s1_perm_fault(u64 par) !(par & SYS_PAR_EL1_S)); } +static bool par_check_s1_access_fault(u64 par) +{ + u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par); + + return ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS && + !(par & SYS_PAR_EL1_S)); +} + void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) { u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr); /* - * If PAR_EL1 reports that AT failed on a S1 permission fault, we - * know for sure that the PTW was able to walk the S1 tables and - * there's nothing else to do. + * If PAR_EL1 reports that AT failed on a S1 permission or access + * fault, we know for sure that the PTW was able to walk the S1 + * tables and there's nothing else to do. * * If AT failed for any other reason, then we must walk the guest S1 * to emulate the instruction. */ - if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) + if ((par & SYS_PAR_EL1_F) && + !par_check_s1_perm_fault(par) && + !par_check_s1_access_fault(par)) par = handle_at_slow(vcpu, op, vaddr); vcpu_write_sys_reg(vcpu, par, PAR_EL1); @@ -1350,7 +1348,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) if (!vcpu_el2_e2h_is_set(vcpu)) val |= HCR_NV | HCR_NV1; - write_sysreg(val, hcr_el2); + write_sysreg_hcr(val); isb(); par = SYS_PAR_EL1_F; @@ -1375,7 +1373,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) if (!fail) par = read_sysreg_par(); - write_sysreg(hcr, hcr_el2); + write_sysreg_hcr(hcr); isb(); } @@ -1444,3 +1442,31 @@ void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) par = compute_par_s12(vcpu, par, &out); vcpu_write_sys_reg(vcpu, par, PAR_EL1); } + +/* + * Translate a VA for a given EL in a given translation regime, with + * or without PAN. This requires wi->{regime, as_el0, pan} to be + * set. The rest of the wi and wr should be 0-initialised. + */ +int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, + struct s1_walk_result *wr, u64 va) +{ + int ret; + + ret = setup_s1_walk(vcpu, wi, wr, va); + if (ret) + return ret; + + if (wr->level == S1_MMU_DISABLED) { + wr->ur = wr->uw = wr->ux = true; + wr->pr = wr->pw = wr->px = true; + } else { + ret = walk_s1(vcpu, wi, wr, va); + if (ret) + return ret; + + compute_s1_permissions(vcpu, wi, wr); + } + + return 0; +} diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c new file mode 100644 index 0000000000000..da66c4a147752 --- /dev/null +++ b/arch/arm64/kvm/config.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Google LLC + * Author: Marc Zyngier <maz@kernel.org> + */ + +#include <linux/kvm_host.h> +#include <asm/sysreg.h> + +struct reg_bits_to_feat_map { + u64 bits; + +#define NEVER_FGU BIT(0) /* Can trap, but never UNDEF */ +#define CALL_FUNC BIT(1) /* Needs to evaluate tons of crap */ +#define FIXED_VALUE BIT(2) /* RAZ/WI or RAO/WI in KVM */ + unsigned long flags; + + union { + struct { + u8 regidx; + u8 shift; + u8 width; + bool sign; + s8 lo_lim; + }; + bool (*match)(struct kvm *); + bool (*fval)(struct kvm *, u64 *); + }; +}; + +#define __NEEDS_FEAT_3(m, f, id, fld, lim) \ + { \ + .bits = (m), \ + .flags = (f), \ + .regidx = IDREG_IDX(SYS_ ## id), \ + .shift = id ##_## fld ## _SHIFT, \ + .width = id ##_## fld ## _WIDTH, \ + .sign = id ##_## fld ## _SIGNED, \ + .lo_lim = id ##_## fld ##_## lim \ + } + +#define __NEEDS_FEAT_2(m, f, fun, dummy) \ + { \ + .bits = (m), \ + .flags = (f) | CALL_FUNC, \ + .fval = (fun), \ + } + +#define __NEEDS_FEAT_1(m, f, fun) \ + { \ + .bits = (m), \ + .flags = (f) | CALL_FUNC, \ + .match = (fun), \ + } + +#define NEEDS_FEAT_FLAG(m, f, ...) \ + CONCATENATE(__NEEDS_FEAT_, COUNT_ARGS(__VA_ARGS__))(m, f, __VA_ARGS__) + +#define NEEDS_FEAT_FIXED(m, ...) \ + NEEDS_FEAT_FLAG(m, FIXED_VALUE, __VA_ARGS__, 0) + +#define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__) + +#define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP +#define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2 +#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP +#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP +#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP +#define FEAT_TRBE ID_AA64DFR0_EL1, TraceBuffer, IMP +#define FEAT_TRBEv1p1 ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1 +#define FEAT_DoubleLock ID_AA64DFR0_EL1, DoubleLock, IMP +#define FEAT_TRF ID_AA64DFR0_EL1, TraceFilt, IMP +#define FEAT_AA32EL0 ID_AA64PFR0_EL1, EL0, AARCH32 +#define FEAT_AA32EL1 ID_AA64PFR0_EL1, EL1, AARCH32 +#define FEAT_AA64EL1 ID_AA64PFR0_EL1, EL1, IMP +#define FEAT_AA64EL3 ID_AA64PFR0_EL1, EL3, IMP +#define FEAT_AIE ID_AA64MMFR3_EL1, AIE, IMP +#define FEAT_S2POE ID_AA64MMFR3_EL1, S2POE, IMP +#define FEAT_S1POE ID_AA64MMFR3_EL1, S1POE, IMP +#define FEAT_S1PIE ID_AA64MMFR3_EL1, S1PIE, IMP +#define FEAT_THE ID_AA64PFR1_EL1, THE, IMP +#define FEAT_SME ID_AA64PFR1_EL1, SME, IMP +#define FEAT_GCS ID_AA64PFR1_EL1, GCS, IMP +#define FEAT_LS64 ID_AA64ISAR1_EL1, LS64, LS64 +#define FEAT_LS64_V ID_AA64ISAR1_EL1, LS64, LS64_V +#define FEAT_LS64_ACCDATA ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA +#define FEAT_RAS ID_AA64PFR0_EL1, RAS, IMP +#define FEAT_RASv2 ID_AA64PFR0_EL1, RAS, V2 +#define FEAT_GICv3 ID_AA64PFR0_EL1, GIC, IMP +#define FEAT_LOR ID_AA64MMFR1_EL1, LO, IMP +#define FEAT_SPEv1p2 ID_AA64DFR0_EL1, PMSVer, V1P2 +#define FEAT_SPEv1p4 ID_AA64DFR0_EL1, PMSVer, V1P4 +#define FEAT_SPEv1p5 ID_AA64DFR0_EL1, PMSVer, V1P5 +#define FEAT_ATS1A ID_AA64ISAR2_EL1, ATS1A, IMP +#define FEAT_SPECRES2 ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX +#define FEAT_SPECRES ID_AA64ISAR1_EL1, SPECRES, IMP +#define FEAT_TLBIRANGE ID_AA64ISAR0_EL1, TLB, RANGE +#define FEAT_TLBIOS ID_AA64ISAR0_EL1, TLB, OS +#define FEAT_PAN2 ID_AA64MMFR1_EL1, PAN, PAN2 +#define FEAT_DPB2 ID_AA64ISAR1_EL1, DPB, DPB2 +#define FEAT_AMUv1 ID_AA64PFR0_EL1, AMU, IMP +#define FEAT_AMUv1p1 ID_AA64PFR0_EL1, AMU, V1P1 +#define FEAT_CMOW ID_AA64MMFR1_EL1, CMOW, IMP +#define FEAT_D128 ID_AA64MMFR3_EL1, D128, IMP +#define FEAT_DoubleFault2 ID_AA64PFR1_EL1, DF2, IMP +#define FEAT_FPMR ID_AA64PFR2_EL1, FPMR, IMP +#define FEAT_MOPS ID_AA64ISAR2_EL1, MOPS, IMP +#define FEAT_NMI ID_AA64PFR1_EL1, NMI, IMP +#define FEAT_SCTLR2 ID_AA64MMFR3_EL1, SCTLRX, IMP +#define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP +#define FEAT_TCR2 ID_AA64MMFR3_EL1, TCRX, IMP +#define FEAT_XS ID_AA64ISAR1_EL1, XS, IMP +#define FEAT_EVT ID_AA64MMFR2_EL1, EVT, IMP +#define FEAT_EVT_TTLBxS ID_AA64MMFR2_EL1, EVT, TTLBxS +#define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2 +#define FEAT_RME ID_AA64PFR0_EL1, RME, IMP +#define FEAT_MPAM ID_AA64PFR0_EL1, MPAM, 1 +#define FEAT_S2FWB ID_AA64MMFR2_EL1, FWB, IMP +#define FEAT_TME ID_AA64ISAR0_EL1, TME, IMP +#define FEAT_TWED ID_AA64MMFR1_EL1, TWED, IMP +#define FEAT_E2H0 ID_AA64MMFR4_EL1, E2H0, IMP +#define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP +#define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP +#define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP +#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9 +#define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP +#define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP +#define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP +#define FEAT_ITE ID_AA64DFR1_EL1, ITE, IMP +#define FEAT_PMUv3_ICNTR ID_AA64DFR1_EL1, PMICNTR, IMP +#define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP +#define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP +#define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP +#define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP +#define FEAT_CPA2 ID_AA64ISAR3_EL1, CPA, CPA2 +#define FEAT_ASID2 ID_AA64MMFR4_EL1, ASID2, IMP +#define FEAT_MEC ID_AA64MMFR3_EL1, MEC, IMP +#define FEAT_HAFT ID_AA64MMFR1_EL1, HAFDBS, HAFT +#define FEAT_BTI ID_AA64PFR1_EL1, BT, IMP +#define FEAT_ExS ID_AA64MMFR0_EL1, EXS, IMP +#define FEAT_IESB ID_AA64MMFR2_EL1, IESB, IMP +#define FEAT_LSE2 ID_AA64MMFR2_EL1, AT, IMP +#define FEAT_LSMAOC ID_AA64MMFR2_EL1, LSM, IMP +#define FEAT_MixedEnd ID_AA64MMFR0_EL1, BIGEND, IMP +#define FEAT_MixedEndEL0 ID_AA64MMFR0_EL1, BIGENDEL0, IMP +#define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2 +#define FEAT_MTE_ASYNC ID_AA64PFR1_EL1, MTE_frac, ASYNC +#define FEAT_MTE_STORE_ONLY ID_AA64PFR2_EL1, MTESTOREONLY, IMP +#define FEAT_PAN ID_AA64MMFR1_EL1, PAN, IMP +#define FEAT_PAN3 ID_AA64MMFR1_EL1, PAN, PAN3 +#define FEAT_SSBS ID_AA64PFR1_EL1, SSBS, IMP +#define FEAT_TIDCP1 ID_AA64MMFR1_EL1, TIDCP1, IMP +#define FEAT_FGT ID_AA64MMFR0_EL1, FGT, IMP +#define FEAT_MTPMU ID_AA64DFR0_EL1, MTPMU, IMP + +static bool not_feat_aa64el3(struct kvm *kvm) +{ + return !kvm_has_feat(kvm, FEAT_AA64EL3); +} + +static bool feat_nv2(struct kvm *kvm) +{ + return ((kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY) && + kvm_has_feat_enum(kvm, ID_AA64MMFR2_EL1, NV, NI)) || + kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2)); +} + +static bool feat_nv2_e2h0_ni(struct kvm *kvm) +{ + return feat_nv2(kvm) && !kvm_has_feat(kvm, FEAT_E2H0); +} + +static bool feat_rasv1p1(struct kvm *kvm) +{ + return (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) || + (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) && + kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1))); +} + +static bool feat_csv2_2_csv2_1p2(struct kvm *kvm) +{ + return (kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) || + (kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2) && + kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, CSV2, IMP))); +} + +static bool feat_pauth(struct kvm *kvm) +{ + return kvm_has_pauth(kvm, PAuth); +} + +static bool feat_pauth_lr(struct kvm *kvm) +{ + return kvm_has_pauth(kvm, PAuth_LR); +} + +static bool feat_aderr(struct kvm *kvm) +{ + return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, FEAT_ADERR) && + kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SDERR, FEAT_ADERR)); +} + +static bool feat_anerr(struct kvm *kvm) +{ + return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ANERR, FEAT_ANERR) && + kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SNERR, FEAT_ANERR)); +} + +static bool feat_sme_smps(struct kvm *kvm) +{ + /* + * Revists this if KVM ever supports SME -- this really should + * look at the guest's view of SMIDR_EL1. Funnily enough, this + * is not captured in the JSON file, but only as a note in the + * ARM ARM. + */ + return (kvm_has_feat(kvm, FEAT_SME) && + (read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS)); +} + +static bool feat_spe_fds(struct kvm *kvm) +{ + /* + * Revists this if KVM ever supports SPE -- this really should + * look at the guest's view of PMSIDR_EL1. + */ + return (kvm_has_feat(kvm, FEAT_SPEv1p4) && + (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS)); +} + +static bool feat_trbe_mpam(struct kvm *kvm) +{ + /* + * Revists this if KVM ever supports both MPAM and TRBE -- + * this really should look at the guest's view of TRBIDR_EL1. + */ + return (kvm_has_feat(kvm, FEAT_TRBE) && + kvm_has_feat(kvm, FEAT_MPAM) && + (read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_MPAM)); +} + +static bool feat_asid2_e2h1(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_ASID2) && !kvm_has_feat(kvm, FEAT_E2H0); +} + +static bool feat_d128_e2h1(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_D128) && !kvm_has_feat(kvm, FEAT_E2H0); +} + +static bool feat_mec_e2h1(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_MEC) && !kvm_has_feat(kvm, FEAT_E2H0); +} + +static bool feat_ebep_pmuv3_ss(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_EBEP) || kvm_has_feat(kvm, FEAT_PMUv3_SS); +} + +static bool feat_mixedendel0(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_MixedEnd) || kvm_has_feat(kvm, FEAT_MixedEndEL0); +} + +static bool feat_mte_async(struct kvm *kvm) +{ + return kvm_has_feat(kvm, FEAT_MTE2) && kvm_has_feat_enum(kvm, FEAT_MTE_ASYNC); +} + +#define check_pmu_revision(k, r) \ + ({ \ + (kvm_has_feat((k), ID_AA64DFR0_EL1, PMUVer, r) && \ + !kvm_has_feat((k), ID_AA64DFR0_EL1, PMUVer, IMP_DEF)); \ + }) + +static bool feat_pmuv3p1(struct kvm *kvm) +{ + return check_pmu_revision(kvm, V3P1); +} + +static bool feat_pmuv3p5(struct kvm *kvm) +{ + return check_pmu_revision(kvm, V3P5); +} + +static bool feat_pmuv3p7(struct kvm *kvm) +{ + return check_pmu_revision(kvm, V3P7); +} + +static bool feat_pmuv3p9(struct kvm *kvm) +{ + return check_pmu_revision(kvm, V3P9); +} + +static bool compute_hcr_rw(struct kvm *kvm, u64 *bits) +{ + /* This is purely academic: AArch32 and NV are mutually exclusive */ + if (bits) { + if (kvm_has_feat(kvm, FEAT_AA32EL1)) + *bits &= ~HCR_EL2_RW; + else + *bits |= HCR_EL2_RW; + } + + return true; +} + +static bool compute_hcr_e2h(struct kvm *kvm, u64 *bits) +{ + if (bits) { + if (kvm_has_feat(kvm, FEAT_E2H0)) + *bits &= ~HCR_EL2_E2H; + else + *bits |= HCR_EL2_E2H; + } + + return true; +} + +static const struct reg_bits_to_feat_map hfgrtr_feat_map[] = { + NEEDS_FEAT(HFGRTR_EL2_nAMAIR2_EL1 | + HFGRTR_EL2_nMAIR2_EL1, + FEAT_AIE), + NEEDS_FEAT(HFGRTR_EL2_nS2POR_EL1, FEAT_S2POE), + NEEDS_FEAT(HFGRTR_EL2_nPOR_EL1 | + HFGRTR_EL2_nPOR_EL0, + FEAT_S1POE), + NEEDS_FEAT(HFGRTR_EL2_nPIR_EL1 | + HFGRTR_EL2_nPIRE0_EL1, + FEAT_S1PIE), + NEEDS_FEAT(HFGRTR_EL2_nRCWMASK_EL1, FEAT_THE), + NEEDS_FEAT(HFGRTR_EL2_nTPIDR2_EL0 | + HFGRTR_EL2_nSMPRI_EL1, + FEAT_SME), + NEEDS_FEAT(HFGRTR_EL2_nGCS_EL1 | + HFGRTR_EL2_nGCS_EL0, + FEAT_GCS), + NEEDS_FEAT(HFGRTR_EL2_nACCDATA_EL1, FEAT_LS64_ACCDATA), + NEEDS_FEAT(HFGRTR_EL2_ERXADDR_EL1 | + HFGRTR_EL2_ERXMISCn_EL1 | + HFGRTR_EL2_ERXSTATUS_EL1 | + HFGRTR_EL2_ERXCTLR_EL1 | + HFGRTR_EL2_ERXFR_EL1 | + HFGRTR_EL2_ERRSELR_EL1 | + HFGRTR_EL2_ERRIDR_EL1, + FEAT_RAS), + NEEDS_FEAT(HFGRTR_EL2_ERXPFGCDN_EL1 | + HFGRTR_EL2_ERXPFGCTL_EL1 | + HFGRTR_EL2_ERXPFGF_EL1, + feat_rasv1p1), + NEEDS_FEAT(HFGRTR_EL2_ICC_IGRPENn_EL1, FEAT_GICv3), + NEEDS_FEAT(HFGRTR_EL2_SCXTNUM_EL0 | + HFGRTR_EL2_SCXTNUM_EL1, + feat_csv2_2_csv2_1p2), + NEEDS_FEAT(HFGRTR_EL2_LORSA_EL1 | + HFGRTR_EL2_LORN_EL1 | + HFGRTR_EL2_LORID_EL1 | + HFGRTR_EL2_LOREA_EL1 | + HFGRTR_EL2_LORC_EL1, + FEAT_LOR), + NEEDS_FEAT(HFGRTR_EL2_APIBKey | + HFGRTR_EL2_APIAKey | + HFGRTR_EL2_APGAKey | + HFGRTR_EL2_APDBKey | + HFGRTR_EL2_APDAKey, + feat_pauth), + NEEDS_FEAT_FLAG(HFGRTR_EL2_VBAR_EL1 | + HFGRTR_EL2_TTBR1_EL1 | + HFGRTR_EL2_TTBR0_EL1 | + HFGRTR_EL2_TPIDR_EL0 | + HFGRTR_EL2_TPIDRRO_EL0 | + HFGRTR_EL2_TPIDR_EL1 | + HFGRTR_EL2_TCR_EL1 | + HFGRTR_EL2_SCTLR_EL1 | + HFGRTR_EL2_REVIDR_EL1 | + HFGRTR_EL2_PAR_EL1 | + HFGRTR_EL2_MPIDR_EL1 | + HFGRTR_EL2_MIDR_EL1 | + HFGRTR_EL2_MAIR_EL1 | + HFGRTR_EL2_ISR_EL1 | + HFGRTR_EL2_FAR_EL1 | + HFGRTR_EL2_ESR_EL1 | + HFGRTR_EL2_DCZID_EL0 | + HFGRTR_EL2_CTR_EL0 | + HFGRTR_EL2_CSSELR_EL1 | + HFGRTR_EL2_CPACR_EL1 | + HFGRTR_EL2_CONTEXTIDR_EL1| + HFGRTR_EL2_CLIDR_EL1 | + HFGRTR_EL2_CCSIDR_EL1 | + HFGRTR_EL2_AMAIR_EL1 | + HFGRTR_EL2_AIDR_EL1 | + HFGRTR_EL2_AFSR1_EL1 | + HFGRTR_EL2_AFSR0_EL1, + NEVER_FGU, FEAT_AA64EL1), +}; + +static const struct reg_bits_to_feat_map hfgwtr_feat_map[] = { + NEEDS_FEAT(HFGWTR_EL2_nAMAIR2_EL1 | + HFGWTR_EL2_nMAIR2_EL1, + FEAT_AIE), + NEEDS_FEAT(HFGWTR_EL2_nS2POR_EL1, FEAT_S2POE), + NEEDS_FEAT(HFGWTR_EL2_nPOR_EL1 | + HFGWTR_EL2_nPOR_EL0, + FEAT_S1POE), + NEEDS_FEAT(HFGWTR_EL2_nPIR_EL1 | + HFGWTR_EL2_nPIRE0_EL1, + FEAT_S1PIE), + NEEDS_FEAT(HFGWTR_EL2_nRCWMASK_EL1, FEAT_THE), + NEEDS_FEAT(HFGWTR_EL2_nTPIDR2_EL0 | + HFGWTR_EL2_nSMPRI_EL1, + FEAT_SME), + NEEDS_FEAT(HFGWTR_EL2_nGCS_EL1 | + HFGWTR_EL2_nGCS_EL0, + FEAT_GCS), + NEEDS_FEAT(HFGWTR_EL2_nACCDATA_EL1, FEAT_LS64_ACCDATA), + NEEDS_FEAT(HFGWTR_EL2_ERXADDR_EL1 | + HFGWTR_EL2_ERXMISCn_EL1 | + HFGWTR_EL2_ERXSTATUS_EL1 | + HFGWTR_EL2_ERXCTLR_EL1 | + HFGWTR_EL2_ERRSELR_EL1, + FEAT_RAS), + NEEDS_FEAT(HFGWTR_EL2_ERXPFGCDN_EL1 | + HFGWTR_EL2_ERXPFGCTL_EL1, + feat_rasv1p1), + NEEDS_FEAT(HFGWTR_EL2_ICC_IGRPENn_EL1, FEAT_GICv3), + NEEDS_FEAT(HFGWTR_EL2_SCXTNUM_EL0 | + HFGWTR_EL2_SCXTNUM_EL1, + feat_csv2_2_csv2_1p2), + NEEDS_FEAT(HFGWTR_EL2_LORSA_EL1 | + HFGWTR_EL2_LORN_EL1 | + HFGWTR_EL2_LOREA_EL1 | + HFGWTR_EL2_LORC_EL1, + FEAT_LOR), + NEEDS_FEAT(HFGWTR_EL2_APIBKey | + HFGWTR_EL2_APIAKey | + HFGWTR_EL2_APGAKey | + HFGWTR_EL2_APDBKey | + HFGWTR_EL2_APDAKey, + feat_pauth), + NEEDS_FEAT_FLAG(HFGWTR_EL2_VBAR_EL1 | + HFGWTR_EL2_TTBR1_EL1 | + HFGWTR_EL2_TTBR0_EL1 | + HFGWTR_EL2_TPIDR_EL0 | + HFGWTR_EL2_TPIDRRO_EL0 | + HFGWTR_EL2_TPIDR_EL1 | + HFGWTR_EL2_TCR_EL1 | + HFGWTR_EL2_SCTLR_EL1 | + HFGWTR_EL2_PAR_EL1 | + HFGWTR_EL2_MAIR_EL1 | + HFGWTR_EL2_FAR_EL1 | + HFGWTR_EL2_ESR_EL1 | + HFGWTR_EL2_CSSELR_EL1 | + HFGWTR_EL2_CPACR_EL1 | + HFGWTR_EL2_CONTEXTIDR_EL1| + HFGWTR_EL2_AMAIR_EL1 | + HFGWTR_EL2_AFSR1_EL1 | + HFGWTR_EL2_AFSR0_EL1, + NEVER_FGU, FEAT_AA64EL1), +}; + +static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = { + NEEDS_FEAT(HDFGRTR_EL2_PMBIDR_EL1 | + HDFGRTR_EL2_PMSLATFR_EL1 | + HDFGRTR_EL2_PMSIRR_EL1 | + HDFGRTR_EL2_PMSIDR_EL1 | + HDFGRTR_EL2_PMSICR_EL1 | + HDFGRTR_EL2_PMSFCR_EL1 | + HDFGRTR_EL2_PMSEVFR_EL1 | + HDFGRTR_EL2_PMSCR_EL1 | + HDFGRTR_EL2_PMBSR_EL1 | + HDFGRTR_EL2_PMBPTR_EL1 | + HDFGRTR_EL2_PMBLIMITR_EL1, + FEAT_SPE), + NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), + NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA | + HDFGRTR_EL2_nBRBCTL | + HDFGRTR_EL2_nBRBIDR, + FEAT_BRBE), + NEEDS_FEAT(HDFGRTR_EL2_TRCVICTLR | + HDFGRTR_EL2_TRCSTATR | + HDFGRTR_EL2_TRCSSCSRn | + HDFGRTR_EL2_TRCSEQSTR | + HDFGRTR_EL2_TRCPRGCTLR | + HDFGRTR_EL2_TRCOSLSR | + HDFGRTR_EL2_TRCIMSPECn | + HDFGRTR_EL2_TRCID | + HDFGRTR_EL2_TRCCNTVRn | + HDFGRTR_EL2_TRCCLAIM | + HDFGRTR_EL2_TRCAUXCTLR | + HDFGRTR_EL2_TRCAUTHSTATUS | + HDFGRTR_EL2_TRC, + FEAT_TRC_SR), + NEEDS_FEAT(HDFGRTR_EL2_PMCEIDn_EL0 | + HDFGRTR_EL2_PMUSERENR_EL0 | + HDFGRTR_EL2_PMMIR_EL1 | + HDFGRTR_EL2_PMSELR_EL0 | + HDFGRTR_EL2_PMOVS | + HDFGRTR_EL2_PMINTEN | + HDFGRTR_EL2_PMCNTEN | + HDFGRTR_EL2_PMCCNTR_EL0 | + HDFGRTR_EL2_PMCCFILTR_EL0 | + HDFGRTR_EL2_PMEVTYPERn_EL0 | + HDFGRTR_EL2_PMEVCNTRn_EL0, + FEAT_PMUv3), + NEEDS_FEAT(HDFGRTR_EL2_TRBTRG_EL1 | + HDFGRTR_EL2_TRBSR_EL1 | + HDFGRTR_EL2_TRBPTR_EL1 | + HDFGRTR_EL2_TRBMAR_EL1 | + HDFGRTR_EL2_TRBLIMITR_EL1 | + HDFGRTR_EL2_TRBIDR_EL1 | + HDFGRTR_EL2_TRBBASER_EL1, + FEAT_TRBE), + NEEDS_FEAT_FLAG(HDFGRTR_EL2_OSDLR_EL1, NEVER_FGU, + FEAT_DoubleLock), + NEEDS_FEAT_FLAG(HDFGRTR_EL2_OSECCR_EL1 | + HDFGRTR_EL2_OSLSR_EL1 | + HDFGRTR_EL2_DBGPRCR_EL1 | + HDFGRTR_EL2_DBGAUTHSTATUS_EL1| + HDFGRTR_EL2_DBGCLAIM | + HDFGRTR_EL2_MDSCR_EL1 | + HDFGRTR_EL2_DBGWVRn_EL1 | + HDFGRTR_EL2_DBGWCRn_EL1 | + HDFGRTR_EL2_DBGBVRn_EL1 | + HDFGRTR_EL2_DBGBCRn_EL1, + NEVER_FGU, FEAT_AA64EL1) +}; + +static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = { + NEEDS_FEAT(HDFGWTR_EL2_PMSLATFR_EL1 | + HDFGWTR_EL2_PMSIRR_EL1 | + HDFGWTR_EL2_PMSICR_EL1 | + HDFGWTR_EL2_PMSFCR_EL1 | + HDFGWTR_EL2_PMSEVFR_EL1 | + HDFGWTR_EL2_PMSCR_EL1 | + HDFGWTR_EL2_PMBSR_EL1 | + HDFGWTR_EL2_PMBPTR_EL1 | + HDFGWTR_EL2_PMBLIMITR_EL1, + FEAT_SPE), + NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), + NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA | + HDFGWTR_EL2_nBRBCTL, + FEAT_BRBE), + NEEDS_FEAT(HDFGWTR_EL2_TRCVICTLR | + HDFGWTR_EL2_TRCSSCSRn | + HDFGWTR_EL2_TRCSEQSTR | + HDFGWTR_EL2_TRCPRGCTLR | + HDFGWTR_EL2_TRCOSLAR | + HDFGWTR_EL2_TRCIMSPECn | + HDFGWTR_EL2_TRCCNTVRn | + HDFGWTR_EL2_TRCCLAIM | + HDFGWTR_EL2_TRCAUXCTLR | + HDFGWTR_EL2_TRC, + FEAT_TRC_SR), + NEEDS_FEAT(HDFGWTR_EL2_PMUSERENR_EL0 | + HDFGWTR_EL2_PMCR_EL0 | + HDFGWTR_EL2_PMSWINC_EL0 | + HDFGWTR_EL2_PMSELR_EL0 | + HDFGWTR_EL2_PMOVS | + HDFGWTR_EL2_PMINTEN | + HDFGWTR_EL2_PMCNTEN | + HDFGWTR_EL2_PMCCNTR_EL0 | + HDFGWTR_EL2_PMCCFILTR_EL0 | + HDFGWTR_EL2_PMEVTYPERn_EL0 | + HDFGWTR_EL2_PMEVCNTRn_EL0, + FEAT_PMUv3), + NEEDS_FEAT(HDFGWTR_EL2_TRBTRG_EL1 | + HDFGWTR_EL2_TRBSR_EL1 | + HDFGWTR_EL2_TRBPTR_EL1 | + HDFGWTR_EL2_TRBMAR_EL1 | + HDFGWTR_EL2_TRBLIMITR_EL1 | + HDFGWTR_EL2_TRBBASER_EL1, + FEAT_TRBE), + NEEDS_FEAT_FLAG(HDFGWTR_EL2_OSDLR_EL1, + NEVER_FGU, FEAT_DoubleLock), + NEEDS_FEAT_FLAG(HDFGWTR_EL2_OSECCR_EL1 | + HDFGWTR_EL2_OSLAR_EL1 | + HDFGWTR_EL2_DBGPRCR_EL1 | + HDFGWTR_EL2_DBGCLAIM | + HDFGWTR_EL2_MDSCR_EL1 | + HDFGWTR_EL2_DBGWVRn_EL1 | + HDFGWTR_EL2_DBGWCRn_EL1 | + HDFGWTR_EL2_DBGBVRn_EL1 | + HDFGWTR_EL2_DBGBCRn_EL1, + NEVER_FGU, FEAT_AA64EL1), + NEEDS_FEAT(HDFGWTR_EL2_TRFCR_EL1, FEAT_TRF), +}; + + +static const struct reg_bits_to_feat_map hfgitr_feat_map[] = { + NEEDS_FEAT(HFGITR_EL2_PSBCSYNC, FEAT_SPEv1p5), + NEEDS_FEAT(HFGITR_EL2_ATS1E1A, FEAT_ATS1A), + NEEDS_FEAT(HFGITR_EL2_COSPRCTX, FEAT_SPECRES2), + NEEDS_FEAT(HFGITR_EL2_nGCSEPP | + HFGITR_EL2_nGCSSTR_EL1 | + HFGITR_EL2_nGCSPUSHM_EL1, + FEAT_GCS), + NEEDS_FEAT(HFGITR_EL2_nBRBIALL | + HFGITR_EL2_nBRBINJ, + FEAT_BRBE), + NEEDS_FEAT(HFGITR_EL2_CPPRCTX | + HFGITR_EL2_DVPRCTX | + HFGITR_EL2_CFPRCTX, + FEAT_SPECRES), + NEEDS_FEAT(HFGITR_EL2_TLBIRVAALE1 | + HFGITR_EL2_TLBIRVALE1 | + HFGITR_EL2_TLBIRVAAE1 | + HFGITR_EL2_TLBIRVAE1 | + HFGITR_EL2_TLBIRVAALE1IS | + HFGITR_EL2_TLBIRVALE1IS | + HFGITR_EL2_TLBIRVAAE1IS | + HFGITR_EL2_TLBIRVAE1IS | + HFGITR_EL2_TLBIRVAALE1OS | + HFGITR_EL2_TLBIRVALE1OS | + HFGITR_EL2_TLBIRVAAE1OS | + HFGITR_EL2_TLBIRVAE1OS, + FEAT_TLBIRANGE), + NEEDS_FEAT(HFGITR_EL2_TLBIVAALE1OS | + HFGITR_EL2_TLBIVALE1OS | + HFGITR_EL2_TLBIVAAE1OS | + HFGITR_EL2_TLBIASIDE1OS | + HFGITR_EL2_TLBIVAE1OS | + HFGITR_EL2_TLBIVMALLE1OS, + FEAT_TLBIOS), + NEEDS_FEAT(HFGITR_EL2_ATS1E1WP | + HFGITR_EL2_ATS1E1RP, + FEAT_PAN2), + NEEDS_FEAT(HFGITR_EL2_DCCVADP, FEAT_DPB2), + NEEDS_FEAT_FLAG(HFGITR_EL2_DCCVAC | + HFGITR_EL2_SVC_EL1 | + HFGITR_EL2_SVC_EL0 | + HFGITR_EL2_ERET | + HFGITR_EL2_TLBIVAALE1 | + HFGITR_EL2_TLBIVALE1 | + HFGITR_EL2_TLBIVAAE1 | + HFGITR_EL2_TLBIASIDE1 | + HFGITR_EL2_TLBIVAE1 | + HFGITR_EL2_TLBIVMALLE1 | + HFGITR_EL2_TLBIVAALE1IS | + HFGITR_EL2_TLBIVALE1IS | + HFGITR_EL2_TLBIVAAE1IS | + HFGITR_EL2_TLBIASIDE1IS | + HFGITR_EL2_TLBIVAE1IS | + HFGITR_EL2_TLBIVMALLE1IS| + HFGITR_EL2_ATS1E0W | + HFGITR_EL2_ATS1E0R | + HFGITR_EL2_ATS1E1W | + HFGITR_EL2_ATS1E1R | + HFGITR_EL2_DCZVA | + HFGITR_EL2_DCCIVAC | + HFGITR_EL2_DCCVAP | + HFGITR_EL2_DCCVAU | + HFGITR_EL2_DCCISW | + HFGITR_EL2_DCCSW | + HFGITR_EL2_DCISW | + HFGITR_EL2_DCIVAC | + HFGITR_EL2_ICIVAU | + HFGITR_EL2_ICIALLU | + HFGITR_EL2_ICIALLUIS, + NEVER_FGU, FEAT_AA64EL1), +}; + +static const struct reg_bits_to_feat_map hafgrtr_feat_map[] = { + NEEDS_FEAT(HAFGRTR_EL2_AMEVTYPER115_EL0 | + HAFGRTR_EL2_AMEVTYPER114_EL0 | + HAFGRTR_EL2_AMEVTYPER113_EL0 | + HAFGRTR_EL2_AMEVTYPER112_EL0 | + HAFGRTR_EL2_AMEVTYPER111_EL0 | + HAFGRTR_EL2_AMEVTYPER110_EL0 | + HAFGRTR_EL2_AMEVTYPER19_EL0 | + HAFGRTR_EL2_AMEVTYPER18_EL0 | + HAFGRTR_EL2_AMEVTYPER17_EL0 | + HAFGRTR_EL2_AMEVTYPER16_EL0 | + HAFGRTR_EL2_AMEVTYPER15_EL0 | + HAFGRTR_EL2_AMEVTYPER14_EL0 | + HAFGRTR_EL2_AMEVTYPER13_EL0 | + HAFGRTR_EL2_AMEVTYPER12_EL0 | + HAFGRTR_EL2_AMEVTYPER11_EL0 | + HAFGRTR_EL2_AMEVTYPER10_EL0 | + HAFGRTR_EL2_AMEVCNTR115_EL0 | + HAFGRTR_EL2_AMEVCNTR114_EL0 | + HAFGRTR_EL2_AMEVCNTR113_EL0 | + HAFGRTR_EL2_AMEVCNTR112_EL0 | + HAFGRTR_EL2_AMEVCNTR111_EL0 | + HAFGRTR_EL2_AMEVCNTR110_EL0 | + HAFGRTR_EL2_AMEVCNTR19_EL0 | + HAFGRTR_EL2_AMEVCNTR18_EL0 | + HAFGRTR_EL2_AMEVCNTR17_EL0 | + HAFGRTR_EL2_AMEVCNTR16_EL0 | + HAFGRTR_EL2_AMEVCNTR15_EL0 | + HAFGRTR_EL2_AMEVCNTR14_EL0 | + HAFGRTR_EL2_AMEVCNTR13_EL0 | + HAFGRTR_EL2_AMEVCNTR12_EL0 | + HAFGRTR_EL2_AMEVCNTR11_EL0 | + HAFGRTR_EL2_AMEVCNTR10_EL0 | + HAFGRTR_EL2_AMCNTEN1 | + HAFGRTR_EL2_AMCNTEN0 | + HAFGRTR_EL2_AMEVCNTR03_EL0 | + HAFGRTR_EL2_AMEVCNTR02_EL0 | + HAFGRTR_EL2_AMEVCNTR01_EL0 | + HAFGRTR_EL2_AMEVCNTR00_EL0, + FEAT_AMUv1), +}; + +static const struct reg_bits_to_feat_map hfgitr2_feat_map[] = { + NEEDS_FEAT(HFGITR2_EL2_nDCCIVAPS, FEAT_PoPS), + NEEDS_FEAT(HFGITR2_EL2_TSBCSYNC, FEAT_TRBEv1p1) +}; + +static const struct reg_bits_to_feat_map hfgrtr2_feat_map[] = { + NEEDS_FEAT(HFGRTR2_EL2_nPFAR_EL1, FEAT_PFAR), + NEEDS_FEAT(HFGRTR2_EL2_nERXGSR_EL1, FEAT_RASv2), + NEEDS_FEAT(HFGRTR2_EL2_nACTLRALIAS_EL1 | + HFGRTR2_EL2_nACTLRMASK_EL1 | + HFGRTR2_EL2_nCPACRALIAS_EL1 | + HFGRTR2_EL2_nCPACRMASK_EL1 | + HFGRTR2_EL2_nSCTLR2MASK_EL1 | + HFGRTR2_EL2_nSCTLRALIAS2_EL1 | + HFGRTR2_EL2_nSCTLRALIAS_EL1 | + HFGRTR2_EL2_nSCTLRMASK_EL1 | + HFGRTR2_EL2_nTCR2ALIAS_EL1 | + HFGRTR2_EL2_nTCR2MASK_EL1 | + HFGRTR2_EL2_nTCRALIAS_EL1 | + HFGRTR2_EL2_nTCRMASK_EL1, + FEAT_SRMASK), + NEEDS_FEAT(HFGRTR2_EL2_nRCWSMASK_EL1, FEAT_THE), +}; + +static const struct reg_bits_to_feat_map hfgwtr2_feat_map[] = { + NEEDS_FEAT(HFGWTR2_EL2_nPFAR_EL1, FEAT_PFAR), + NEEDS_FEAT(HFGWTR2_EL2_nACTLRALIAS_EL1 | + HFGWTR2_EL2_nACTLRMASK_EL1 | + HFGWTR2_EL2_nCPACRALIAS_EL1 | + HFGWTR2_EL2_nCPACRMASK_EL1 | + HFGWTR2_EL2_nSCTLR2MASK_EL1 | + HFGWTR2_EL2_nSCTLRALIAS2_EL1 | + HFGWTR2_EL2_nSCTLRALIAS_EL1 | + HFGWTR2_EL2_nSCTLRMASK_EL1 | + HFGWTR2_EL2_nTCR2ALIAS_EL1 | + HFGWTR2_EL2_nTCR2MASK_EL1 | + HFGWTR2_EL2_nTCRALIAS_EL1 | + HFGWTR2_EL2_nTCRMASK_EL1, + FEAT_SRMASK), + NEEDS_FEAT(HFGWTR2_EL2_nRCWSMASK_EL1, FEAT_THE), +}; + +static const struct reg_bits_to_feat_map hdfgrtr2_feat_map[] = { + NEEDS_FEAT(HDFGRTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9), + NEEDS_FEAT(HDFGRTR2_EL2_nPMECR_EL1, feat_ebep_pmuv3_ss), + NEEDS_FEAT(HDFGRTR2_EL2_nTRCITECR_EL1, FEAT_ITE), + NEEDS_FEAT(HDFGRTR2_EL2_nPMICFILTR_EL0 | + HDFGRTR2_EL2_nPMICNTR_EL0, + FEAT_PMUv3_ICNTR), + NEEDS_FEAT(HDFGRTR2_EL2_nPMUACR_EL1, feat_pmuv3p9), + NEEDS_FEAT(HDFGRTR2_EL2_nPMSSCR_EL1 | + HDFGRTR2_EL2_nPMSSDATA, + FEAT_PMUv3_SS), + NEEDS_FEAT(HDFGRTR2_EL2_nPMIAR_EL1, FEAT_SEBEP), + NEEDS_FEAT(HDFGRTR2_EL2_nPMSDSFR_EL1, feat_spe_fds), + NEEDS_FEAT(HDFGRTR2_EL2_nPMBMAR_EL1, FEAT_SPE_nVM), + NEEDS_FEAT(HDFGRTR2_EL2_nSPMACCESSR_EL1 | + HDFGRTR2_EL2_nSPMCNTEN | + HDFGRTR2_EL2_nSPMCR_EL0 | + HDFGRTR2_EL2_nSPMDEVAFF_EL1 | + HDFGRTR2_EL2_nSPMEVCNTRn_EL0 | + HDFGRTR2_EL2_nSPMEVTYPERn_EL0| + HDFGRTR2_EL2_nSPMID | + HDFGRTR2_EL2_nSPMINTEN | + HDFGRTR2_EL2_nSPMOVS | + HDFGRTR2_EL2_nSPMSCR_EL1 | + HDFGRTR2_EL2_nSPMSELR_EL0, + FEAT_SPMU), + NEEDS_FEAT(HDFGRTR2_EL2_nMDSTEPOP_EL1, FEAT_STEP2), + NEEDS_FEAT(HDFGRTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam), +}; + +static const struct reg_bits_to_feat_map hdfgwtr2_feat_map[] = { + NEEDS_FEAT(HDFGWTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9), + NEEDS_FEAT(HDFGWTR2_EL2_nPMECR_EL1, feat_ebep_pmuv3_ss), + NEEDS_FEAT(HDFGWTR2_EL2_nTRCITECR_EL1, FEAT_ITE), + NEEDS_FEAT(HDFGWTR2_EL2_nPMICFILTR_EL0 | + HDFGWTR2_EL2_nPMICNTR_EL0, + FEAT_PMUv3_ICNTR), + NEEDS_FEAT(HDFGWTR2_EL2_nPMUACR_EL1 | + HDFGWTR2_EL2_nPMZR_EL0, + feat_pmuv3p9), + NEEDS_FEAT(HDFGWTR2_EL2_nPMSSCR_EL1, FEAT_PMUv3_SS), + NEEDS_FEAT(HDFGWTR2_EL2_nPMIAR_EL1, FEAT_SEBEP), + NEEDS_FEAT(HDFGWTR2_EL2_nPMSDSFR_EL1, feat_spe_fds), + NEEDS_FEAT(HDFGWTR2_EL2_nPMBMAR_EL1, FEAT_SPE_nVM), + NEEDS_FEAT(HDFGWTR2_EL2_nSPMACCESSR_EL1 | + HDFGWTR2_EL2_nSPMCNTEN | + HDFGWTR2_EL2_nSPMCR_EL0 | + HDFGWTR2_EL2_nSPMEVCNTRn_EL0 | + HDFGWTR2_EL2_nSPMEVTYPERn_EL0| + HDFGWTR2_EL2_nSPMINTEN | + HDFGWTR2_EL2_nSPMOVS | + HDFGWTR2_EL2_nSPMSCR_EL1 | + HDFGWTR2_EL2_nSPMSELR_EL0, + FEAT_SPMU), + NEEDS_FEAT(HDFGWTR2_EL2_nMDSTEPOP_EL1, FEAT_STEP2), + NEEDS_FEAT(HDFGWTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam), +}; + +static const struct reg_bits_to_feat_map hcrx_feat_map[] = { + NEEDS_FEAT(HCRX_EL2_PACMEn, feat_pauth_lr), + NEEDS_FEAT(HCRX_EL2_EnFPM, FEAT_FPMR), + NEEDS_FEAT(HCRX_EL2_GCSEn, FEAT_GCS), + NEEDS_FEAT(HCRX_EL2_EnIDCP128, FEAT_SYSREG128), + NEEDS_FEAT(HCRX_EL2_EnSDERR, feat_aderr), + NEEDS_FEAT(HCRX_EL2_TMEA, FEAT_DoubleFault2), + NEEDS_FEAT(HCRX_EL2_EnSNERR, feat_anerr), + NEEDS_FEAT(HCRX_EL2_D128En, FEAT_D128), + NEEDS_FEAT(HCRX_EL2_PTTWI, FEAT_THE), + NEEDS_FEAT(HCRX_EL2_SCTLR2En, FEAT_SCTLR2), + NEEDS_FEAT(HCRX_EL2_TCR2En, FEAT_TCR2), + NEEDS_FEAT(HCRX_EL2_MSCEn | + HCRX_EL2_MCE2, + FEAT_MOPS), + NEEDS_FEAT(HCRX_EL2_CMOW, FEAT_CMOW), + NEEDS_FEAT(HCRX_EL2_VFNMI | + HCRX_EL2_VINMI | + HCRX_EL2_TALLINT, + FEAT_NMI), + NEEDS_FEAT(HCRX_EL2_SMPME, feat_sme_smps), + NEEDS_FEAT(HCRX_EL2_FGTnXS | + HCRX_EL2_FnXS, + FEAT_XS), + NEEDS_FEAT(HCRX_EL2_EnASR, FEAT_LS64_V), + NEEDS_FEAT(HCRX_EL2_EnALS, FEAT_LS64), + NEEDS_FEAT(HCRX_EL2_EnAS0, FEAT_LS64_ACCDATA), +}; + +static const struct reg_bits_to_feat_map hcr_feat_map[] = { + NEEDS_FEAT(HCR_EL2_TID0, FEAT_AA32EL0), + NEEDS_FEAT_FIXED(HCR_EL2_RW, compute_hcr_rw), + NEEDS_FEAT(HCR_EL2_HCD, not_feat_aa64el3), + NEEDS_FEAT(HCR_EL2_AMO | + HCR_EL2_BSU | + HCR_EL2_CD | + HCR_EL2_DC | + HCR_EL2_FB | + HCR_EL2_FMO | + HCR_EL2_ID | + HCR_EL2_IMO | + HCR_EL2_MIOCNCE | + HCR_EL2_PTW | + HCR_EL2_SWIO | + HCR_EL2_TACR | + HCR_EL2_TDZ | + HCR_EL2_TGE | + HCR_EL2_TID1 | + HCR_EL2_TID2 | + HCR_EL2_TID3 | + HCR_EL2_TIDCP | + HCR_EL2_TPCP | + HCR_EL2_TPU | + HCR_EL2_TRVM | + HCR_EL2_TSC | + HCR_EL2_TSW | + HCR_EL2_TTLB | + HCR_EL2_TVM | + HCR_EL2_TWE | + HCR_EL2_TWI | + HCR_EL2_VF | + HCR_EL2_VI | + HCR_EL2_VM | + HCR_EL2_VSE, + FEAT_AA64EL1), + NEEDS_FEAT(HCR_EL2_AMVOFFEN, FEAT_AMUv1p1), + NEEDS_FEAT(HCR_EL2_EnSCXT, feat_csv2_2_csv2_1p2), + NEEDS_FEAT(HCR_EL2_TICAB | + HCR_EL2_TID4 | + HCR_EL2_TOCU, + FEAT_EVT), + NEEDS_FEAT(HCR_EL2_TTLBIS | + HCR_EL2_TTLBOS, + FEAT_EVT_TTLBxS), + NEEDS_FEAT(HCR_EL2_TLOR, FEAT_LOR), + NEEDS_FEAT(HCR_EL2_ATA | + HCR_EL2_DCT | + HCR_EL2_TID5, + FEAT_MTE2), + NEEDS_FEAT(HCR_EL2_AT | /* Ignore the original FEAT_NV */ + HCR_EL2_NV2 | + HCR_EL2_NV, + feat_nv2), + NEEDS_FEAT(HCR_EL2_NV1, feat_nv2_e2h0_ni), /* Missing from JSON */ + NEEDS_FEAT(HCR_EL2_API | + HCR_EL2_APK, + feat_pauth), + NEEDS_FEAT(HCR_EL2_TEA | + HCR_EL2_TERR, + FEAT_RAS), + NEEDS_FEAT(HCR_EL2_FIEN, feat_rasv1p1), + NEEDS_FEAT(HCR_EL2_GPF, FEAT_RME), + NEEDS_FEAT(HCR_EL2_FWB, FEAT_S2FWB), + NEEDS_FEAT(HCR_EL2_TME, FEAT_TME), + NEEDS_FEAT(HCR_EL2_TWEDEL | + HCR_EL2_TWEDEn, + FEAT_TWED), + NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h), +}; + +static const struct reg_bits_to_feat_map sctlr2_feat_map[] = { + NEEDS_FEAT(SCTLR2_EL1_NMEA | + SCTLR2_EL1_EASE, + FEAT_DoubleFault2), + NEEDS_FEAT(SCTLR2_EL1_EnADERR, feat_aderr), + NEEDS_FEAT(SCTLR2_EL1_EnANERR, feat_anerr), + NEEDS_FEAT(SCTLR2_EL1_EnIDCP128, FEAT_SYSREG128), + NEEDS_FEAT(SCTLR2_EL1_EnPACM | + SCTLR2_EL1_EnPACM0, + feat_pauth_lr), + NEEDS_FEAT(SCTLR2_EL1_CPTA | + SCTLR2_EL1_CPTA0 | + SCTLR2_EL1_CPTM | + SCTLR2_EL1_CPTM0, + FEAT_CPA2), +}; + +static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = { + NEEDS_FEAT(TCR2_EL2_FNG1 | + TCR2_EL2_FNG0 | + TCR2_EL2_A2, + feat_asid2_e2h1), + NEEDS_FEAT(TCR2_EL2_DisCH1 | + TCR2_EL2_DisCH0 | + TCR2_EL2_D128, + feat_d128_e2h1), + NEEDS_FEAT(TCR2_EL2_AMEC1, feat_mec_e2h1), + NEEDS_FEAT(TCR2_EL2_AMEC0, FEAT_MEC), + NEEDS_FEAT(TCR2_EL2_HAFT, FEAT_HAFT), + NEEDS_FEAT(TCR2_EL2_PTTWI | + TCR2_EL2_PnCH, + FEAT_THE), + NEEDS_FEAT(TCR2_EL2_AIE, FEAT_AIE), + NEEDS_FEAT(TCR2_EL2_POE | + TCR2_EL2_E0POE, + FEAT_S1POE), + NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE), +}; + +static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = { + NEEDS_FEAT(SCTLR_EL1_CP15BEN | + SCTLR_EL1_ITD | + SCTLR_EL1_SED, + FEAT_AA32EL0), + NEEDS_FEAT(SCTLR_EL1_BT0 | + SCTLR_EL1_BT1, + FEAT_BTI), + NEEDS_FEAT(SCTLR_EL1_CMOW, FEAT_CMOW), + NEEDS_FEAT(SCTLR_EL1_TSCXT, feat_csv2_2_csv2_1p2), + NEEDS_FEAT(SCTLR_EL1_EIS | + SCTLR_EL1_EOS, + FEAT_ExS), + NEEDS_FEAT(SCTLR_EL1_EnFPM, FEAT_FPMR), + NEEDS_FEAT(SCTLR_EL1_IESB, FEAT_IESB), + NEEDS_FEAT(SCTLR_EL1_EnALS, FEAT_LS64), + NEEDS_FEAT(SCTLR_EL1_EnAS0, FEAT_LS64_ACCDATA), + NEEDS_FEAT(SCTLR_EL1_EnASR, FEAT_LS64_V), + NEEDS_FEAT(SCTLR_EL1_nAA, FEAT_LSE2), + NEEDS_FEAT(SCTLR_EL1_LSMAOE | + SCTLR_EL1_nTLSMD, + FEAT_LSMAOC), + NEEDS_FEAT(SCTLR_EL1_EE, FEAT_MixedEnd), + NEEDS_FEAT(SCTLR_EL1_E0E, feat_mixedendel0), + NEEDS_FEAT(SCTLR_EL1_MSCEn, FEAT_MOPS), + NEEDS_FEAT(SCTLR_EL1_ATA0 | + SCTLR_EL1_ATA | + SCTLR_EL1_TCF0 | + SCTLR_EL1_TCF, + FEAT_MTE2), + NEEDS_FEAT(SCTLR_EL1_ITFSB, feat_mte_async), + NEEDS_FEAT(SCTLR_EL1_TCSO0 | + SCTLR_EL1_TCSO, + FEAT_MTE_STORE_ONLY), + NEEDS_FEAT(SCTLR_EL1_NMI | + SCTLR_EL1_SPINTMASK, + FEAT_NMI), + NEEDS_FEAT(SCTLR_EL1_SPAN, FEAT_PAN), + NEEDS_FEAT(SCTLR_EL1_EPAN, FEAT_PAN3), + NEEDS_FEAT(SCTLR_EL1_EnDA | + SCTLR_EL1_EnDB | + SCTLR_EL1_EnIA | + SCTLR_EL1_EnIB, + feat_pauth), + NEEDS_FEAT(SCTLR_EL1_EnTP2, FEAT_SME), + NEEDS_FEAT(SCTLR_EL1_EnRCTX, FEAT_SPECRES), + NEEDS_FEAT(SCTLR_EL1_DSSBS, FEAT_SSBS), + NEEDS_FEAT(SCTLR_EL1_TIDCP, FEAT_TIDCP1), + NEEDS_FEAT(SCTLR_EL1_TME0 | + SCTLR_EL1_TME | + SCTLR_EL1_TMT0 | + SCTLR_EL1_TMT, + FEAT_TME), + NEEDS_FEAT(SCTLR_EL1_TWEDEL | + SCTLR_EL1_TWEDEn, + FEAT_TWED), + NEEDS_FEAT(SCTLR_EL1_UCI | + SCTLR_EL1_EE | + SCTLR_EL1_E0E | + SCTLR_EL1_WXN | + SCTLR_EL1_nTWE | + SCTLR_EL1_nTWI | + SCTLR_EL1_UCT | + SCTLR_EL1_DZE | + SCTLR_EL1_I | + SCTLR_EL1_UMA | + SCTLR_EL1_SA0 | + SCTLR_EL1_SA | + SCTLR_EL1_C | + SCTLR_EL1_A | + SCTLR_EL1_M, + FEAT_AA64EL1), +}; + +static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = { + NEEDS_FEAT(MDCR_EL2_EBWE, FEAT_Debugv8p9), + NEEDS_FEAT(MDCR_EL2_TDOSA, FEAT_DoubleLock), + NEEDS_FEAT(MDCR_EL2_PMEE, FEAT_EBEP), + NEEDS_FEAT(MDCR_EL2_TDCC, FEAT_FGT), + NEEDS_FEAT(MDCR_EL2_MTPME, FEAT_MTPMU), + NEEDS_FEAT(MDCR_EL2_HPME | + MDCR_EL2_HPMN | + MDCR_EL2_TPMCR | + MDCR_EL2_TPM, + FEAT_PMUv3), + NEEDS_FEAT(MDCR_EL2_HPMD, feat_pmuv3p1), + NEEDS_FEAT(MDCR_EL2_HCCD | + MDCR_EL2_HLP, + feat_pmuv3p5), + NEEDS_FEAT(MDCR_EL2_HPMFZO, feat_pmuv3p7), + NEEDS_FEAT(MDCR_EL2_PMSSE, FEAT_PMUv3_SS), + NEEDS_FEAT(MDCR_EL2_E2PB | + MDCR_EL2_TPMS, + FEAT_SPE), + NEEDS_FEAT(MDCR_EL2_HPMFZS, FEAT_SPEv1p2), + NEEDS_FEAT(MDCR_EL2_EnSPM, FEAT_SPMU), + NEEDS_FEAT(MDCR_EL2_EnSTEPOP, FEAT_STEP2), + NEEDS_FEAT(MDCR_EL2_E2TB, FEAT_TRBE), + NEEDS_FEAT(MDCR_EL2_TTRF, FEAT_TRF), + NEEDS_FEAT(MDCR_EL2_TDA | + MDCR_EL2_TDE | + MDCR_EL2_TDRA, + FEAT_AA64EL1), +}; + +static void __init check_feat_map(const struct reg_bits_to_feat_map *map, + int map_size, u64 res0, const char *str) +{ + u64 mask = 0; + + for (int i = 0; i < map_size; i++) + mask |= map[i].bits; + + if (mask != ~res0) + kvm_err("Undefined %s behaviour, bits %016llx\n", + str, mask ^ ~res0); +} + +void __init check_feature_map(void) +{ + check_feat_map(hfgrtr_feat_map, ARRAY_SIZE(hfgrtr_feat_map), + hfgrtr_masks.res0, hfgrtr_masks.str); + check_feat_map(hfgwtr_feat_map, ARRAY_SIZE(hfgwtr_feat_map), + hfgwtr_masks.res0, hfgwtr_masks.str); + check_feat_map(hfgitr_feat_map, ARRAY_SIZE(hfgitr_feat_map), + hfgitr_masks.res0, hfgitr_masks.str); + check_feat_map(hdfgrtr_feat_map, ARRAY_SIZE(hdfgrtr_feat_map), + hdfgrtr_masks.res0, hdfgrtr_masks.str); + check_feat_map(hdfgwtr_feat_map, ARRAY_SIZE(hdfgwtr_feat_map), + hdfgwtr_masks.res0, hdfgwtr_masks.str); + check_feat_map(hafgrtr_feat_map, ARRAY_SIZE(hafgrtr_feat_map), + hafgrtr_masks.res0, hafgrtr_masks.str); + check_feat_map(hcrx_feat_map, ARRAY_SIZE(hcrx_feat_map), + __HCRX_EL2_RES0, "HCRX_EL2"); + check_feat_map(hcr_feat_map, ARRAY_SIZE(hcr_feat_map), + HCR_EL2_RES0, "HCR_EL2"); + check_feat_map(sctlr2_feat_map, ARRAY_SIZE(sctlr2_feat_map), + SCTLR2_EL1_RES0, "SCTLR2_EL1"); + check_feat_map(tcr2_el2_feat_map, ARRAY_SIZE(tcr2_el2_feat_map), + TCR2_EL2_RES0, "TCR2_EL2"); + check_feat_map(sctlr_el1_feat_map, ARRAY_SIZE(sctlr_el1_feat_map), + SCTLR_EL1_RES0, "SCTLR_EL1"); + check_feat_map(mdcr_el2_feat_map, ARRAY_SIZE(mdcr_el2_feat_map), + MDCR_EL2_RES0, "MDCR_EL2"); +} + +static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map) +{ + u64 regval = kvm->arch.id_regs[map->regidx]; + u64 regfld = (regval >> map->shift) & GENMASK(map->width - 1, 0); + + if (map->sign) { + s64 sfld = sign_extend64(regfld, map->width - 1); + s64 slim = sign_extend64(map->lo_lim, map->width - 1); + return sfld >= slim; + } else { + return regfld >= map->lo_lim; + } +} + +static u64 __compute_fixed_bits(struct kvm *kvm, + const struct reg_bits_to_feat_map *map, + int map_size, + u64 *fixed_bits, + unsigned long require, + unsigned long exclude) +{ + u64 val = 0; + + for (int i = 0; i < map_size; i++) { + bool match; + + if ((map[i].flags & require) != require) + continue; + + if (map[i].flags & exclude) + continue; + + if (map[i].flags & CALL_FUNC) + match = (map[i].flags & FIXED_VALUE) ? + map[i].fval(kvm, fixed_bits) : + map[i].match(kvm); + else + match = idreg_feat_match(kvm, &map[i]); + + if (!match || (map[i].flags & FIXED_VALUE)) + val |= map[i].bits; + } + + return val; +} + +static u64 compute_res0_bits(struct kvm *kvm, + const struct reg_bits_to_feat_map *map, + int map_size, + unsigned long require, + unsigned long exclude) +{ + return __compute_fixed_bits(kvm, map, map_size, NULL, + require, exclude | FIXED_VALUE); +} + +static u64 compute_fixed_bits(struct kvm *kvm, + const struct reg_bits_to_feat_map *map, + int map_size, + u64 *fixed_bits, + unsigned long require, + unsigned long exclude) +{ + return __compute_fixed_bits(kvm, map, map_size, fixed_bits, + require | FIXED_VALUE, exclude); +} + +void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt) +{ + u64 val = 0; + + switch (fgt) { + case HFGRTR_GROUP: + val |= compute_res0_bits(kvm, hfgrtr_feat_map, + ARRAY_SIZE(hfgrtr_feat_map), + 0, NEVER_FGU); + val |= compute_res0_bits(kvm, hfgwtr_feat_map, + ARRAY_SIZE(hfgwtr_feat_map), + 0, NEVER_FGU); + break; + case HFGITR_GROUP: + val |= compute_res0_bits(kvm, hfgitr_feat_map, + ARRAY_SIZE(hfgitr_feat_map), + 0, NEVER_FGU); + break; + case HDFGRTR_GROUP: + val |= compute_res0_bits(kvm, hdfgrtr_feat_map, + ARRAY_SIZE(hdfgrtr_feat_map), + 0, NEVER_FGU); + val |= compute_res0_bits(kvm, hdfgwtr_feat_map, + ARRAY_SIZE(hdfgwtr_feat_map), + 0, NEVER_FGU); + break; + case HAFGRTR_GROUP: + val |= compute_res0_bits(kvm, hafgrtr_feat_map, + ARRAY_SIZE(hafgrtr_feat_map), + 0, NEVER_FGU); + break; + case HFGRTR2_GROUP: + val |= compute_res0_bits(kvm, hfgrtr2_feat_map, + ARRAY_SIZE(hfgrtr2_feat_map), + 0, NEVER_FGU); + val |= compute_res0_bits(kvm, hfgwtr2_feat_map, + ARRAY_SIZE(hfgwtr2_feat_map), + 0, NEVER_FGU); + break; + case HFGITR2_GROUP: + val |= compute_res0_bits(kvm, hfgitr2_feat_map, + ARRAY_SIZE(hfgitr2_feat_map), + 0, NEVER_FGU); + break; + case HDFGRTR2_GROUP: + val |= compute_res0_bits(kvm, hdfgrtr2_feat_map, + ARRAY_SIZE(hdfgrtr2_feat_map), + 0, NEVER_FGU); + val |= compute_res0_bits(kvm, hdfgwtr2_feat_map, + ARRAY_SIZE(hdfgwtr2_feat_map), + 0, NEVER_FGU); + break; + default: + BUG(); + } + + kvm->arch.fgu[fgt] = val; +} + +void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1) +{ + u64 fixed = 0, mask; + + switch (reg) { + case HFGRTR_EL2: + *res0 = compute_res0_bits(kvm, hfgrtr_feat_map, + ARRAY_SIZE(hfgrtr_feat_map), 0, 0); + *res0 |= hfgrtr_masks.res0; + *res1 = HFGRTR_EL2_RES1; + break; + case HFGWTR_EL2: + *res0 = compute_res0_bits(kvm, hfgwtr_feat_map, + ARRAY_SIZE(hfgwtr_feat_map), 0, 0); + *res0 |= hfgwtr_masks.res0; + *res1 = HFGWTR_EL2_RES1; + break; + case HFGITR_EL2: + *res0 = compute_res0_bits(kvm, hfgitr_feat_map, + ARRAY_SIZE(hfgitr_feat_map), 0, 0); + *res0 |= hfgitr_masks.res0; + *res1 = HFGITR_EL2_RES1; + break; + case HDFGRTR_EL2: + *res0 = compute_res0_bits(kvm, hdfgrtr_feat_map, + ARRAY_SIZE(hdfgrtr_feat_map), 0, 0); + *res0 |= hdfgrtr_masks.res0; + *res1 = HDFGRTR_EL2_RES1; + break; + case HDFGWTR_EL2: + *res0 = compute_res0_bits(kvm, hdfgwtr_feat_map, + ARRAY_SIZE(hdfgwtr_feat_map), 0, 0); + *res0 |= hdfgwtr_masks.res0; + *res1 = HDFGWTR_EL2_RES1; + break; + case HAFGRTR_EL2: + *res0 = compute_res0_bits(kvm, hafgrtr_feat_map, + ARRAY_SIZE(hafgrtr_feat_map), 0, 0); + *res0 |= hafgrtr_masks.res0; + *res1 = HAFGRTR_EL2_RES1; + break; + case HFGRTR2_EL2: + *res0 = compute_res0_bits(kvm, hfgrtr2_feat_map, + ARRAY_SIZE(hfgrtr2_feat_map), 0, 0); + *res0 |= hfgrtr2_masks.res0; + *res1 = HFGRTR2_EL2_RES1; + break; + case HFGWTR2_EL2: + *res0 = compute_res0_bits(kvm, hfgwtr2_feat_map, + ARRAY_SIZE(hfgwtr2_feat_map), 0, 0); + *res0 |= hfgwtr2_masks.res0; + *res1 = HFGWTR2_EL2_RES1; + break; + case HFGITR2_EL2: + *res0 = compute_res0_bits(kvm, hfgitr2_feat_map, + ARRAY_SIZE(hfgitr2_feat_map), 0, 0); + *res0 |= hfgitr2_masks.res0; + *res1 = HFGITR2_EL2_RES1; + break; + case HDFGRTR2_EL2: + *res0 = compute_res0_bits(kvm, hdfgrtr2_feat_map, + ARRAY_SIZE(hdfgrtr2_feat_map), 0, 0); + *res0 |= hdfgrtr2_masks.res0; + *res1 = HDFGRTR2_EL2_RES1; + break; + case HDFGWTR2_EL2: + *res0 = compute_res0_bits(kvm, hdfgwtr2_feat_map, + ARRAY_SIZE(hdfgwtr2_feat_map), 0, 0); + *res0 |= hdfgwtr2_masks.res0; + *res1 = HDFGWTR2_EL2_RES1; + break; + case HCRX_EL2: + *res0 = compute_res0_bits(kvm, hcrx_feat_map, + ARRAY_SIZE(hcrx_feat_map), 0, 0); + *res0 |= __HCRX_EL2_RES0; + *res1 = __HCRX_EL2_RES1; + break; + case HCR_EL2: + mask = compute_fixed_bits(kvm, hcr_feat_map, + ARRAY_SIZE(hcr_feat_map), &fixed, + 0, 0); + *res0 = compute_res0_bits(kvm, hcr_feat_map, + ARRAY_SIZE(hcr_feat_map), 0, 0); + *res0 |= HCR_EL2_RES0 | (mask & ~fixed); + *res1 = HCR_EL2_RES1 | (mask & fixed); + break; + case SCTLR2_EL1: + case SCTLR2_EL2: + *res0 = compute_res0_bits(kvm, sctlr2_feat_map, + ARRAY_SIZE(sctlr2_feat_map), 0, 0); + *res0 |= SCTLR2_EL1_RES0; + *res1 = SCTLR2_EL1_RES1; + break; + case TCR2_EL2: + *res0 = compute_res0_bits(kvm, tcr2_el2_feat_map, + ARRAY_SIZE(tcr2_el2_feat_map), 0, 0); + *res0 |= TCR2_EL2_RES0; + *res1 = TCR2_EL2_RES1; + break; + case SCTLR_EL1: + *res0 = compute_res0_bits(kvm, sctlr_el1_feat_map, + ARRAY_SIZE(sctlr_el1_feat_map), 0, 0); + *res0 |= SCTLR_EL1_RES0; + *res1 = SCTLR_EL1_RES1; + break; + case MDCR_EL2: + *res0 = compute_res0_bits(kvm, mdcr_el2_feat_map, + ARRAY_SIZE(mdcr_el2_feat_map), 0, 0); + *res0 |= MDCR_EL2_RES0; + *res1 = MDCR_EL2_RES1; + break; + default: + WARN_ON_ONCE(1); + *res0 = *res1 = 0; + break; + } +} diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 0e4c805e7e891..381382c19fe47 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -81,6 +81,10 @@ void kvm_init_host_debug_data(void) !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) host_data_set_flag(HAS_SPE); + /* Check if we have BRBE implemented and available at the host */ + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT)) + host_data_set_flag(HAS_BRBE); + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) { /* Force disable trace in protected mode in case of no TRBE */ if (is_protected_kvm_enabled()) @@ -216,9 +220,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu) void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val) { if (val & OSLAR_EL1_OSLK) - __vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK; + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK); else - __vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK; + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK); preempt_disable(); kvm_arch_vcpu_put(vcpu); diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 0fcfcc0478f94..90cb4b7ae0ff7 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -88,6 +88,7 @@ enum cgt_group_id { CGT_HCRX_EnFPM, CGT_HCRX_TCR2En, + CGT_HCRX_SCTLR2En, CGT_CNTHCTL_EL1TVT, CGT_CNTHCTL_EL1TVCT, @@ -108,6 +109,7 @@ enum cgt_group_id { CGT_HCR_TTLB_TTLBOS, CGT_HCR_TVM_TRVM, CGT_HCR_TVM_TRVM_HCRX_TCR2En, + CGT_HCR_TVM_TRVM_HCRX_SCTLR2En, CGT_HCR_TPU_TICAB, CGT_HCR_TPU_TOCU, CGT_HCR_NV1_nNV2_ENSCXT, @@ -398,6 +400,12 @@ static const struct trap_bits coarse_trap_bits[] = { .mask = HCRX_EL2_TCR2En, .behaviour = BEHAVE_FORWARD_RW, }, + [CGT_HCRX_SCTLR2En] = { + .index = HCRX_EL2, + .value = 0, + .mask = HCRX_EL2_SCTLR2En, + .behaviour = BEHAVE_FORWARD_RW, + }, [CGT_CNTHCTL_EL1TVT] = { .index = CNTHCTL_EL2, .value = CNTHCTL_EL1TVT, @@ -449,6 +457,8 @@ static const enum cgt_group_id *coarse_control_combo[] = { MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM), MCB(CGT_HCR_TVM_TRVM_HCRX_TCR2En, CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_TCR2En), + MCB(CGT_HCR_TVM_TRVM_HCRX_SCTLR2En, + CGT_HCR_TVM, CGT_HCR_TRVM, CGT_HCRX_SCTLR2En), MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB), MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU), MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT), @@ -622,6 +632,11 @@ struct encoding_to_trap_config { const unsigned int line; }; +/* + * WARNING: using ranges is a treacherous endeavour, as sysregs that + * are part of an architectural range are not necessarily contiguous + * in the [Op0,Op1,CRn,CRm,Ops] space. Tread carefully. + */ #define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \ { \ .encoding = sr_start, \ @@ -777,6 +792,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS), SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS), SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM), + SR_TRAP(SYS_SCTLR2_EL1, CGT_HCR_TVM_TRVM_HCRX_SCTLR2En), SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM), SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM), SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM), @@ -1279,98 +1295,129 @@ enum fg_filter_id { __NR_FG_FILTER_IDS__ }; -#define SR_FGF(sr, g, b, p, f) \ - { \ - .encoding = sr, \ - .end = sr, \ - .tc = { \ +#define __FGT(g, b, p, f) \ + { \ .fgt = g ## _GROUP, \ .bit = g ## _EL2_ ## b ## _SHIFT, \ .pol = p, \ .fgf = f, \ - }, \ + } + +#define FGT(g, b, p) __FGT(g, b, p, __NO_FGF__) + +/* + * See the warning next to SR_RANGE_TRAP(), and apply the same + * level of caution. + */ +#define SR_FGF_RANGE(sr, e, g, b, p, f) \ + { \ + .encoding = sr, \ + .end = e, \ + .tc = __FGT(g, b, p, f), \ .line = __LINE__, \ } -#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__) +#define SR_FGF(sr, g, b, p, f) SR_FGF_RANGE(sr, sr, g, b, p, f) +#define SR_FGT(sr, g, b, p) SR_FGF_RANGE(sr, sr, g, b, p, __NO_FGF__) +#define SR_FGT_RANGE(sr, end, g, b, p) \ + SR_FGF_RANGE(sr, end, g, b, p, __NO_FGF__) static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { /* HFGRTR_EL2, HFGWTR_EL2 */ - SR_FGT(SYS_AMAIR2_EL1, HFGxTR, nAMAIR2_EL1, 0), - SR_FGT(SYS_MAIR2_EL1, HFGxTR, nMAIR2_EL1, 0), - SR_FGT(SYS_S2POR_EL1, HFGxTR, nS2POR_EL1, 0), - SR_FGT(SYS_POR_EL1, HFGxTR, nPOR_EL1, 0), - SR_FGT(SYS_POR_EL0, HFGxTR, nPOR_EL0, 0), - SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0), - SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0), - SR_FGT(SYS_RCWMASK_EL1, HFGxTR, nRCWMASK_EL1, 0), - SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0), - SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0), - SR_FGT(SYS_GCSCR_EL1, HFGxTR, nGCS_EL1, 0), - SR_FGT(SYS_GCSPR_EL1, HFGxTR, nGCS_EL1, 0), - SR_FGT(SYS_GCSCRE0_EL1, HFGxTR, nGCS_EL0, 0), - SR_FGT(SYS_GCSPR_EL0, HFGxTR, nGCS_EL0, 0), - SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0), - SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1), - SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1), - SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1), - SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1), - SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1), - SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1), - SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1), - SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1), - SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1), - SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1), - SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1), - SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1), - SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1), - SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), - SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1), - SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1), - SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1), - SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1), - SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1), - SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1), - SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1), - SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1), - SR_FGT(SYS_TCR2_EL1, HFGxTR, TCR_EL1, 1), - SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1), - SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1), - SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1), - SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1), - SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1), - SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1), - SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1), - SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1), - SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1), - SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1), - SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1), - SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1), - SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1), - SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1), - SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1), - SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1), - SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1), - SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1), - SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1), - SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1), - SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1), - SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1), - SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1), - SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1), - SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1), - SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1), - SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1), - SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1), - SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1), - SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1), - SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1), - SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1), - SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1), - SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1), - SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1), - SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1), - SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1), + SR_FGT(SYS_AMAIR2_EL1, HFGRTR, nAMAIR2_EL1, 0), + SR_FGT(SYS_MAIR2_EL1, HFGRTR, nMAIR2_EL1, 0), + SR_FGT(SYS_S2POR_EL1, HFGRTR, nS2POR_EL1, 0), + SR_FGT(SYS_POR_EL1, HFGRTR, nPOR_EL1, 0), + SR_FGT(SYS_POR_EL0, HFGRTR, nPOR_EL0, 0), + SR_FGT(SYS_PIR_EL1, HFGRTR, nPIR_EL1, 0), + SR_FGT(SYS_PIRE0_EL1, HFGRTR, nPIRE0_EL1, 0), + SR_FGT(SYS_RCWMASK_EL1, HFGRTR, nRCWMASK_EL1, 0), + SR_FGT(SYS_TPIDR2_EL0, HFGRTR, nTPIDR2_EL0, 0), + SR_FGT(SYS_SMPRI_EL1, HFGRTR, nSMPRI_EL1, 0), + SR_FGT(SYS_GCSCR_EL1, HFGRTR, nGCS_EL1, 0), + SR_FGT(SYS_GCSPR_EL1, HFGRTR, nGCS_EL1, 0), + SR_FGT(SYS_GCSCRE0_EL1, HFGRTR, nGCS_EL0, 0), + SR_FGT(SYS_GCSPR_EL0, HFGRTR, nGCS_EL0, 0), + SR_FGT(SYS_ACCDATA_EL1, HFGRTR, nACCDATA_EL1, 0), + SR_FGT(SYS_ERXADDR_EL1, HFGRTR, ERXADDR_EL1, 1), + SR_FGT(SYS_ERXPFGCDN_EL1, HFGRTR, ERXPFGCDN_EL1, 1), + SR_FGT(SYS_ERXPFGCTL_EL1, HFGRTR, ERXPFGCTL_EL1, 1), + SR_FGT(SYS_ERXPFGF_EL1, HFGRTR, ERXPFGF_EL1, 1), + SR_FGT(SYS_ERXMISC0_EL1, HFGRTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC1_EL1, HFGRTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC2_EL1, HFGRTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXMISC3_EL1, HFGRTR, ERXMISCn_EL1, 1), + SR_FGT(SYS_ERXSTATUS_EL1, HFGRTR, ERXSTATUS_EL1, 1), + SR_FGT(SYS_ERXCTLR_EL1, HFGRTR, ERXCTLR_EL1, 1), + SR_FGT(SYS_ERXFR_EL1, HFGRTR, ERXFR_EL1, 1), + SR_FGT(SYS_ERRSELR_EL1, HFGRTR, ERRSELR_EL1, 1), + SR_FGT(SYS_ERRIDR_EL1, HFGRTR, ERRIDR_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGRTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGRTR, ICC_IGRPENn_EL1, 1), + SR_FGT(SYS_VBAR_EL1, HFGRTR, VBAR_EL1, 1), + SR_FGT(SYS_TTBR1_EL1, HFGRTR, TTBR1_EL1, 1), + SR_FGT(SYS_TTBR0_EL1, HFGRTR, TTBR0_EL1, 1), + SR_FGT(SYS_TPIDR_EL0, HFGRTR, TPIDR_EL0, 1), + SR_FGT(SYS_TPIDRRO_EL0, HFGRTR, TPIDRRO_EL0, 1), + SR_FGT(SYS_TPIDR_EL1, HFGRTR, TPIDR_EL1, 1), + SR_FGT(SYS_TCR_EL1, HFGRTR, TCR_EL1, 1), + SR_FGT(SYS_TCR2_EL1, HFGRTR, TCR_EL1, 1), + SR_FGT(SYS_SCXTNUM_EL0, HFGRTR, SCXTNUM_EL0, 1), + SR_FGT(SYS_SCXTNUM_EL1, HFGRTR, SCXTNUM_EL1, 1), + SR_FGT(SYS_SCTLR_EL1, HFGRTR, SCTLR_EL1, 1), + SR_FGT(SYS_SCTLR2_EL1, HFGRTR, SCTLR_EL1, 1), + SR_FGT(SYS_REVIDR_EL1, HFGRTR, REVIDR_EL1, 1), + SR_FGT(SYS_PAR_EL1, HFGRTR, PAR_EL1, 1), + SR_FGT(SYS_MPIDR_EL1, HFGRTR, MPIDR_EL1, 1), + SR_FGT(SYS_MIDR_EL1, HFGRTR, MIDR_EL1, 1), + SR_FGT(SYS_MAIR_EL1, HFGRTR, MAIR_EL1, 1), + SR_FGT(SYS_LORSA_EL1, HFGRTR, LORSA_EL1, 1), + SR_FGT(SYS_LORN_EL1, HFGRTR, LORN_EL1, 1), + SR_FGT(SYS_LORID_EL1, HFGRTR, LORID_EL1, 1), + SR_FGT(SYS_LOREA_EL1, HFGRTR, LOREA_EL1, 1), + SR_FGT(SYS_LORC_EL1, HFGRTR, LORC_EL1, 1), + SR_FGT(SYS_ISR_EL1, HFGRTR, ISR_EL1, 1), + SR_FGT(SYS_FAR_EL1, HFGRTR, FAR_EL1, 1), + SR_FGT(SYS_ESR_EL1, HFGRTR, ESR_EL1, 1), + SR_FGT(SYS_DCZID_EL0, HFGRTR, DCZID_EL0, 1), + SR_FGT(SYS_CTR_EL0, HFGRTR, CTR_EL0, 1), + SR_FGT(SYS_CSSELR_EL1, HFGRTR, CSSELR_EL1, 1), + SR_FGT(SYS_CPACR_EL1, HFGRTR, CPACR_EL1, 1), + SR_FGT(SYS_CONTEXTIDR_EL1, HFGRTR, CONTEXTIDR_EL1, 1), + SR_FGT(SYS_CLIDR_EL1, HFGRTR, CLIDR_EL1, 1), + SR_FGT(SYS_CCSIDR_EL1, HFGRTR, CCSIDR_EL1, 1), + SR_FGT(SYS_APIBKEYLO_EL1, HFGRTR, APIBKey, 1), + SR_FGT(SYS_APIBKEYHI_EL1, HFGRTR, APIBKey, 1), + SR_FGT(SYS_APIAKEYLO_EL1, HFGRTR, APIAKey, 1), + SR_FGT(SYS_APIAKEYHI_EL1, HFGRTR, APIAKey, 1), + SR_FGT(SYS_APGAKEYLO_EL1, HFGRTR, APGAKey, 1), + SR_FGT(SYS_APGAKEYHI_EL1, HFGRTR, APGAKey, 1), + SR_FGT(SYS_APDBKEYLO_EL1, HFGRTR, APDBKey, 1), + SR_FGT(SYS_APDBKEYHI_EL1, HFGRTR, APDBKey, 1), + SR_FGT(SYS_APDAKEYLO_EL1, HFGRTR, APDAKey, 1), + SR_FGT(SYS_APDAKEYHI_EL1, HFGRTR, APDAKey, 1), + SR_FGT(SYS_AMAIR_EL1, HFGRTR, AMAIR_EL1, 1), + SR_FGT(SYS_AIDR_EL1, HFGRTR, AIDR_EL1, 1), + SR_FGT(SYS_AFSR1_EL1, HFGRTR, AFSR1_EL1, 1), + SR_FGT(SYS_AFSR0_EL1, HFGRTR, AFSR0_EL1, 1), + + /* HFGRTR2_EL2, HFGWTR2_EL2 */ + SR_FGT(SYS_ACTLRALIAS_EL1, HFGRTR2, nACTLRALIAS_EL1, 0), + SR_FGT(SYS_ACTLRMASK_EL1, HFGRTR2, nACTLRMASK_EL1, 0), + SR_FGT(SYS_CPACRALIAS_EL1, HFGRTR2, nCPACRALIAS_EL1, 0), + SR_FGT(SYS_CPACRMASK_EL1, HFGRTR2, nCPACRMASK_EL1, 0), + SR_FGT(SYS_PFAR_EL1, HFGRTR2, nPFAR_EL1, 0), + SR_FGT(SYS_RCWSMASK_EL1, HFGRTR2, nRCWSMASK_EL1, 0), + SR_FGT(SYS_SCTLR2ALIAS_EL1, HFGRTR2, nSCTLRALIAS2_EL1, 0), + SR_FGT(SYS_SCTLR2MASK_EL1, HFGRTR2, nSCTLR2MASK_EL1, 0), + SR_FGT(SYS_SCTLRALIAS_EL1, HFGRTR2, nSCTLRALIAS_EL1, 0), + SR_FGT(SYS_SCTLRMASK_EL1, HFGRTR2, nSCTLRMASK_EL1, 0), + SR_FGT(SYS_TCR2ALIAS_EL1, HFGRTR2, nTCR2ALIAS_EL1, 0), + SR_FGT(SYS_TCR2MASK_EL1, HFGRTR2, nTCR2MASK_EL1, 0), + SR_FGT(SYS_TCRALIAS_EL1, HFGRTR2, nTCRALIAS_EL1, 0), + SR_FGT(SYS_TCRMASK_EL1, HFGRTR2, nTCRMASK_EL1, 0), + SR_FGT(SYS_ERXGSR_EL1, HFGRTR2, nERXGSR_EL1, 0), + /* HFGITR_EL2 */ SR_FGT(OP_AT_S1E1A, HFGITR, ATS1E1A, 1), SR_FGT(OP_COSP_RCTX, HFGITR, COSPRCTX, 1), @@ -1480,6 +1527,11 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1), SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1), SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1), + + /* HFGITR2_EL2 */ + SR_FGT(SYS_DC_CIGDVAPS, HFGITR2, nDCCIVAPS, 0), + SR_FGT(SYS_DC_CIVAPS, HFGITR2, nDCCIVAPS, 0), + /* HDFGRTR_EL2 */ SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1), SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0), @@ -1789,68 +1841,12 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1), SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1), SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1), - SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1), + SR_FGT_RANGE(SYS_PMEVTYPERn_EL0(0), + SYS_PMEVTYPERn_EL0(30), + HDFGRTR, PMEVTYPERn_EL0, 1), + SR_FGT_RANGE(SYS_PMEVCNTRn_EL0(0), + SYS_PMEVCNTRn_EL0(30), + HDFGRTR, PMEVCNTRn_EL0, 1), SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1), SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1), SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1), @@ -1928,6 +1924,59 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1), SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1), SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1), + + /* HDFGRTR2_EL2 */ + SR_FGT(SYS_MDSELR_EL1, HDFGRTR2, nMDSELR_EL1, 0), + SR_FGT(SYS_MDSTEPOP_EL1, HDFGRTR2, nMDSTEPOP_EL1, 0), + SR_FGT(SYS_PMCCNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0), + SR_FGT_RANGE(SYS_PMEVCNTSVRn_EL1(0), + SYS_PMEVCNTSVRn_EL1(30), + HDFGRTR2, nPMSSDATA, 0), + SR_FGT(SYS_PMICNTSVR_EL1, HDFGRTR2, nPMSSDATA, 0), + SR_FGT(SYS_PMECR_EL1, HDFGRTR2, nPMECR_EL1, 0), + SR_FGT(SYS_PMIAR_EL1, HDFGRTR2, nPMIAR_EL1, 0), + SR_FGT(SYS_PMICFILTR_EL0, HDFGRTR2, nPMICFILTR_EL0, 0), + SR_FGT(SYS_PMICNTR_EL0, HDFGRTR2, nPMICNTR_EL0, 0), + SR_FGT(SYS_PMSSCR_EL1, HDFGRTR2, nPMSSCR_EL1, 0), + SR_FGT(SYS_PMUACR_EL1, HDFGRTR2, nPMUACR_EL1, 0), + SR_FGT(SYS_SPMACCESSR_EL1, HDFGRTR2, nSPMACCESSR_EL1, 0), + SR_FGT(SYS_SPMCFGR_EL1, HDFGRTR2, nSPMID, 0), + SR_FGT(SYS_SPMDEVARCH_EL1, HDFGRTR2, nSPMID, 0), + SR_FGT(SYS_SPMCGCRn_EL1(0), HDFGRTR2, nSPMID, 0), + SR_FGT(SYS_SPMCGCRn_EL1(1), HDFGRTR2, nSPMID, 0), + SR_FGT(SYS_SPMIIDR_EL1, HDFGRTR2, nSPMID, 0), + SR_FGT(SYS_SPMCNTENCLR_EL0, HDFGRTR2, nSPMCNTEN, 0), + SR_FGT(SYS_SPMCNTENSET_EL0, HDFGRTR2, nSPMCNTEN, 0), + SR_FGT(SYS_SPMCR_EL0, HDFGRTR2, nSPMCR_EL0, 0), + SR_FGT(SYS_SPMDEVAFF_EL1, HDFGRTR2, nSPMDEVAFF_EL1, 0), + /* + * We have up to 64 of these registers in ranges of 16, banked via + * SPMSELR_EL0.BANK. We're only concerned with the accessors here, + * not the architectural registers. + */ + SR_FGT_RANGE(SYS_SPMEVCNTRn_EL0(0), + SYS_SPMEVCNTRn_EL0(15), + HDFGRTR2, nSPMEVCNTRn_EL0, 0), + SR_FGT_RANGE(SYS_SPMEVFILT2Rn_EL0(0), + SYS_SPMEVFILT2Rn_EL0(15), + HDFGRTR2, nSPMEVTYPERn_EL0, 0), + SR_FGT_RANGE(SYS_SPMEVFILTRn_EL0(0), + SYS_SPMEVFILTRn_EL0(15), + HDFGRTR2, nSPMEVTYPERn_EL0, 0), + SR_FGT_RANGE(SYS_SPMEVTYPERn_EL0(0), + SYS_SPMEVTYPERn_EL0(15), + HDFGRTR2, nSPMEVTYPERn_EL0, 0), + SR_FGT(SYS_SPMINTENCLR_EL1, HDFGRTR2, nSPMINTEN, 0), + SR_FGT(SYS_SPMINTENSET_EL1, HDFGRTR2, nSPMINTEN, 0), + SR_FGT(SYS_SPMOVSCLR_EL0, HDFGRTR2, nSPMOVS, 0), + SR_FGT(SYS_SPMOVSSET_EL0, HDFGRTR2, nSPMOVS, 0), + SR_FGT(SYS_SPMSCR_EL1, HDFGRTR2, nSPMSCR_EL1, 0), + SR_FGT(SYS_SPMSELR_EL0, HDFGRTR2, nSPMSELR_EL0, 0), + SR_FGT(SYS_TRCITECR_EL1, HDFGRTR2, nTRCITECR_EL1, 0), + SR_FGT(SYS_PMBMAR_EL1, HDFGRTR2, nPMBMAR_EL1, 0), + SR_FGT(SYS_PMSDSFR_EL1, HDFGRTR2, nPMSDSFR_EL1, 0), + SR_FGT(SYS_TRBMPAM_EL1, HDFGRTR2, nTRBMPAM_EL1, 0), + /* * HDFGWTR_EL2 * @@ -1938,12 +1987,19 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { * read-side mappings, and only the write-side mappings that * differ from the read side, and the trap handler will pick * the correct shadow register based on the access type. + * + * Same model applies to the FEAT_FGT2 registers. */ SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1), SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1), SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1), SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1), SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1), + + /* HDFGWTR2_EL2 */ + SR_FGT(SYS_PMZR_EL0, HDFGWTR2, nPMZR_EL0, 0), + SR_FGT(SYS_SPMZR_EL0, HDFGWTR2, nSPMEVCNTRn_EL0, 0), + /* * HAFGRTR_EL2 */ @@ -1989,6 +2045,20 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = { SR_FGT(SYS_AMEVCNTR0_EL0(0), HAFGRTR, AMEVCNTR00_EL0, 1), }; +/* + * Additional FGTs that do not fire with ESR_EL2.EC==0x18. This table + * isn't used for exception routing, but only as a promise that the + * trap is handled somewhere else. + */ +static const union trap_config non_0x18_fgt[] __initconst = { + FGT(HFGITR, PSBCSYNC, 1), + FGT(HFGITR, nGCSSTR_EL1, 0), + FGT(HFGITR, SVC_EL1, 1), + FGT(HFGITR, SVC_EL0, 1), + FGT(HFGITR, ERET, 1), + FGT(HFGITR2, TSBCSYNC, 1), +}; + static union trap_config get_trap_config(u32 sysreg) { return (union trap_config) { @@ -2033,6 +2103,130 @@ static u32 encoding_next(u32 encoding) return sys_reg(op0 + 1, 0, 0, 0, 0); } +#define FGT_MASKS(__n, __m) \ + struct fgt_masks __n = { .str = #__m, .res0 = __m, } + +FGT_MASKS(hfgrtr_masks, HFGRTR_EL2_RES0); +FGT_MASKS(hfgwtr_masks, HFGWTR_EL2_RES0); +FGT_MASKS(hfgitr_masks, HFGITR_EL2_RES0); +FGT_MASKS(hdfgrtr_masks, HDFGRTR_EL2_RES0); +FGT_MASKS(hdfgwtr_masks, HDFGWTR_EL2_RES0); +FGT_MASKS(hafgrtr_masks, HAFGRTR_EL2_RES0); +FGT_MASKS(hfgrtr2_masks, HFGRTR2_EL2_RES0); +FGT_MASKS(hfgwtr2_masks, HFGWTR2_EL2_RES0); +FGT_MASKS(hfgitr2_masks, HFGITR2_EL2_RES0); +FGT_MASKS(hdfgrtr2_masks, HDFGRTR2_EL2_RES0); +FGT_MASKS(hdfgwtr2_masks, HDFGWTR2_EL2_RES0); + +static __init bool aggregate_fgt(union trap_config tc) +{ + struct fgt_masks *rmasks, *wmasks; + + switch (tc.fgt) { + case HFGRTR_GROUP: + rmasks = &hfgrtr_masks; + wmasks = &hfgwtr_masks; + break; + case HDFGRTR_GROUP: + rmasks = &hdfgrtr_masks; + wmasks = &hdfgwtr_masks; + break; + case HAFGRTR_GROUP: + rmasks = &hafgrtr_masks; + wmasks = NULL; + break; + case HFGITR_GROUP: + rmasks = &hfgitr_masks; + wmasks = NULL; + break; + case HFGRTR2_GROUP: + rmasks = &hfgrtr2_masks; + wmasks = &hfgwtr2_masks; + break; + case HDFGRTR2_GROUP: + rmasks = &hdfgrtr2_masks; + wmasks = &hdfgwtr2_masks; + break; + case HFGITR2_GROUP: + rmasks = &hfgitr2_masks; + wmasks = NULL; + break; + } + + /* + * A bit can be reserved in either the R or W register, but + * not both. + */ + if ((BIT(tc.bit) & rmasks->res0) && + (!wmasks || (BIT(tc.bit) & wmasks->res0))) + return false; + + if (tc.pol) + rmasks->mask |= BIT(tc.bit) & ~rmasks->res0; + else + rmasks->nmask |= BIT(tc.bit) & ~rmasks->res0; + + if (wmasks) { + if (tc.pol) + wmasks->mask |= BIT(tc.bit) & ~wmasks->res0; + else + wmasks->nmask |= BIT(tc.bit) & ~wmasks->res0; + } + + return true; +} + +static __init int check_fgt_masks(struct fgt_masks *masks) +{ + unsigned long duplicate = masks->mask & masks->nmask; + u64 res0 = masks->res0; + int ret = 0; + + if (duplicate) { + int i; + + for_each_set_bit(i, &duplicate, 64) { + kvm_err("%s[%d] bit has both polarities\n", + masks->str, i); + } + + ret = -EINVAL; + } + + masks->res0 = ~(masks->mask | masks->nmask); + if (masks->res0 != res0) + kvm_info("Implicit %s = %016llx, expecting %016llx\n", + masks->str, masks->res0, res0); + + return ret; +} + +static __init int check_all_fgt_masks(int ret) +{ + static struct fgt_masks * const masks[] __initconst = { + &hfgrtr_masks, + &hfgwtr_masks, + &hfgitr_masks, + &hdfgrtr_masks, + &hdfgwtr_masks, + &hafgrtr_masks, + &hfgrtr2_masks, + &hfgwtr2_masks, + &hfgitr2_masks, + &hdfgrtr2_masks, + &hdfgwtr2_masks, + }; + int err = 0; + + for (int i = 0; i < ARRAY_SIZE(masks); i++) + err |= check_fgt_masks(masks[i]); + + return ret ?: err; +} + +#define for_each_encoding_in(__x, __s, __e) \ + for (u32 __x = __s; __x <= __e; __x = encoding_next(__x)) + int __init populate_nv_trap_config(void) { int ret = 0; @@ -2041,6 +2235,7 @@ int __init populate_nv_trap_config(void) BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS)); BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS)); BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS)); + BUILD_BUG_ON(__HCRX_EL2_MASK & __HCRX_EL2_nMASK); for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) { const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i]; @@ -2051,7 +2246,7 @@ int __init populate_nv_trap_config(void) ret = -EINVAL; } - for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) { + for_each_encoding_in(enc, cgt->encoding, cgt->end) { prev = xa_store(&sr_forward_xa, enc, xa_mk_value(cgt->tc.val), GFP_KERNEL); if (prev && !xa_is_err(prev)) { @@ -2066,6 +2261,10 @@ int __init populate_nv_trap_config(void) } } + if (__HCRX_EL2_RES0 != HCRX_EL2_RES0) + kvm_info("Sanitised HCR_EL2_RES0 = %016llx, expecting %016llx\n", + __HCRX_EL2_RES0, HCRX_EL2_RES0); + kvm_info("nv: %ld coarse grained trap handlers\n", ARRAY_SIZE(encoding_to_cgt)); @@ -2082,23 +2281,39 @@ int __init populate_nv_trap_config(void) print_nv_trap_error(fgt, "Invalid FGT", ret); } - tc = get_trap_config(fgt->encoding); + for_each_encoding_in(enc, fgt->encoding, fgt->end) { + tc = get_trap_config(enc); - if (tc.fgt) { - ret = -EINVAL; - print_nv_trap_error(fgt, "Duplicate FGT", ret); - } + if (tc.fgt) { + ret = -EINVAL; + print_nv_trap_error(fgt, "Duplicate FGT", ret); + } + + tc.val |= fgt->tc.val; + prev = xa_store(&sr_forward_xa, enc, + xa_mk_value(tc.val), GFP_KERNEL); + + if (xa_is_err(prev)) { + ret = xa_err(prev); + print_nv_trap_error(fgt, "Failed FGT insertion", ret); + } - tc.val |= fgt->tc.val; - prev = xa_store(&sr_forward_xa, fgt->encoding, - xa_mk_value(tc.val), GFP_KERNEL); + if (!aggregate_fgt(tc)) { + ret = -EINVAL; + print_nv_trap_error(fgt, "FGT bit is reserved", ret); + } + } + } - if (xa_is_err(prev)) { - ret = xa_err(prev); - print_nv_trap_error(fgt, "Failed FGT insertion", ret); + for (int i = 0; i < ARRAY_SIZE(non_0x18_fgt); i++) { + if (!aggregate_fgt(non_0x18_fgt[i])) { + ret = -EINVAL; + kvm_err("non_0x18_fgt[%d] is reserved\n", i); } } + ret = check_all_fgt_masks(ret); + kvm_info("nv: %ld fine grained trap handlers\n", ARRAY_SIZE(encoding_to_fgt)); @@ -2215,11 +2430,11 @@ static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr) return masks->mask[sr - __VNCR_START__].res0; } -static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read, - u64 val, const union trap_config tc) +static bool check_fgt_bit(struct kvm_vcpu *vcpu, enum vcpu_sysreg sr, + const union trap_config tc) { struct kvm *kvm = vcpu->kvm; - enum vcpu_sysreg sr; + u64 val; /* * KVM doesn't know about any FGTs that apply to the host, and hopefully @@ -2228,6 +2443,8 @@ static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read, if (is_hyp_ctxt(vcpu)) return false; + val = __vcpu_sys_reg(vcpu, sr); + if (tc.pol) return (val & BIT(tc.bit)); @@ -2242,38 +2459,17 @@ static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read, if (val & BIT(tc.bit)) return false; - switch ((enum fgt_group_id)tc.fgt) { - case HFGxTR_GROUP: - sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2; - break; - - case HDFGRTR_GROUP: - sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2; - break; - - case HAFGRTR_GROUP: - sr = HAFGRTR_EL2; - break; - - case HFGITR_GROUP: - sr = HFGITR_EL2; - break; - - default: - WARN_ONCE(1, "Unhandled FGT group"); - return false; - } - return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit)); } bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) { + enum vcpu_sysreg fgtreg; union trap_config tc; enum trap_behaviour b; bool is_read; u32 sysreg; - u64 esr, val; + u64 esr; esr = kvm_vcpu_get_esr(vcpu); sysreg = esr_sys64_to_sysreg(esr); @@ -2319,26 +2515,20 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) case __NO_FGT_GROUP__: break; - case HFGxTR_GROUP: - if (is_read) - val = __vcpu_sys_reg(vcpu, HFGRTR_EL2); - else - val = __vcpu_sys_reg(vcpu, HFGWTR_EL2); + case HFGRTR_GROUP: + fgtreg = is_read ? HFGRTR_EL2 : HFGWTR_EL2; break; case HDFGRTR_GROUP: - if (is_read) - val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2); - else - val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2); + fgtreg = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2; break; case HAFGRTR_GROUP: - val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2); + fgtreg = HAFGRTR_EL2; break; case HFGITR_GROUP: - val = __vcpu_sys_reg(vcpu, HFGITR_EL2); + fgtreg = HFGITR_EL2; switch (tc.fgf) { u64 tmp; @@ -2352,13 +2542,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) } break; - case __NR_FGT_GROUP_IDS__: + case HFGRTR2_GROUP: + fgtreg = is_read ? HFGRTR2_EL2 : HFGWTR2_EL2; + break; + + case HDFGRTR2_GROUP: + fgtreg = is_read ? HDFGRTR2_EL2 : HDFGWTR2_EL2; + break; + + case HFGITR2_GROUP: + fgtreg = HFGITR2_EL2; + break; + + default: /* Something is really wrong, bail out */ - WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); + WARN_ONCE(1, "Bad FGT group (encoding %08x, config %016llx)\n", + sysreg, tc.val); goto local; } - if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, is_read, val, tc)) + if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, fgtreg, tc)) goto inject; b = compute_trap_behaviour(vcpu, tc); @@ -2401,13 +2604,8 @@ inject: static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit) { - bool control_bit_set; - - if (!vcpu_has_nv(vcpu)) - return false; - - control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit; - if (!is_hyp_ctxt(vcpu) && control_bit_set) { + if (is_nested_ctxt(vcpu) && + (__vcpu_sys_reg(vcpu, reg) & control_bit)) { kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); return true; } @@ -2471,13 +2669,6 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) { u64 spsr, elr, esr; - /* - * Forward this trap to the virtual EL2 if the virtual - * HCR_EL2.NV bit is set and this is coming from !EL2. - */ - if (forward_hcr_traps(vcpu, HCR_NV)) - return; - spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2); spsr = kvm_check_illegal_exception_return(vcpu, spsr); @@ -2535,6 +2726,9 @@ static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2, case except_type_irq: kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_IRQ); break; + case except_type_serror: + kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR); + break; default: WARN_ONCE(1, "Unsupported EL2 exception injection %d\n", type); } @@ -2632,3 +2826,28 @@ int kvm_inject_nested_irq(struct kvm_vcpu *vcpu) /* esr_el2 value doesn't matter for exits due to irqs. */ return kvm_inject_nested(vcpu, 0, except_type_irq); } + +int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) +{ + u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, + iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW); + esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL; + + vcpu_write_sys_reg(vcpu, FAR_EL2, addr); + + if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE) + return kvm_inject_nested(vcpu, esr, except_type_serror); + + return kvm_inject_nested_sync(vcpu, esr); +} + +int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr) +{ + /* + * Hardware sets up the EC field when propagating ESR as a result of + * vSError injection. Manually populate EC for an emulated SError + * exception. + */ + esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR); + return kvm_inject_nested(vcpu, esr, except_type_serror); +} diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 7f6e43d256915..15e17aca1dec0 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -15,32 +15,6 @@ #include <asm/sysreg.h> /* - * Called on entry to KVM_RUN unless this vcpu previously ran at least - * once and the most recent prior KVM_RUN for this vcpu was called from - * the same task as current (highly likely). - * - * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), - * such that on entering hyp the relevant parts of current are already - * mapped. - */ -int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) -{ - struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; - int ret; - - /* pKVM has its own tracking of the host fpsimd state. */ - if (is_protected_kvm_enabled()) - return 0; - - /* Make sure the host task fpsimd state is visible to hyp: */ - ret = kvm_share_hyp(fpsimd, fpsimd + 1); - if (ret) - return ret; - - return 0; -} - -/* * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. * The actual loading is done by the FPSIMD access trap taken to hyp. * @@ -103,8 +77,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fp_state.sve_state = vcpu->arch.sve_state; fp_state.sve_vl = vcpu->arch.sve_max_vl; fp_state.sme_state = NULL; - fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR); - fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR); + fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR); + fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR); fp_state.fp_type = &vcpu->arch.fp_type; if (vcpu_has_sve(vcpu)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 2196979a24a32..16ba5e9ac86c3 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -818,8 +818,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { - events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN); + events->exception.serror_pending = (vcpu->arch.hcr_el2 & HCR_VSE) || + vcpu_get_flag(vcpu, NESTED_SERROR_PENDING); if (events->exception.serror_pending && events->exception.serror_has_esr) events->exception.serror_esr = vcpu_get_vsesr(vcpu); @@ -833,29 +834,62 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, return 0; } +static void commit_pending_events(struct kvm_vcpu *vcpu) +{ + if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION)) + return; + + /* + * Reset the MMIO emulation state to avoid stepping PC after emulating + * the exception entry. + */ + vcpu->mmio_needed = false; + kvm_call_hyp(__kvm_adjust_pc, vcpu); +} + int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { bool serror_pending = events->exception.serror_pending; bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending; + u64 esr = events->exception.serror_esr; + int ret = 0; - if (serror_pending && has_esr) { - if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) - return -EINVAL; - - if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) - kvm_set_sei_esr(vcpu, events->exception.serror_esr); - else - return -EINVAL; - } else if (serror_pending) { - kvm_inject_vabt(vcpu); + /* + * Immediately commit the pending SEA to the vCPU's architectural + * state which is necessary since we do not return a pending SEA + * to userspace via KVM_GET_VCPU_EVENTS. + */ + if (ext_dabt_pending) { + ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + commit_pending_events(vcpu); } - if (ext_dabt_pending) - kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + if (ret < 0) + return ret; - return 0; + if (!serror_pending) + return 0; + + if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && has_esr) + return -EINVAL; + + if (has_esr && (esr & ~ESR_ELx_ISS_MASK)) + return -EINVAL; + + if (has_esr) + ret = kvm_inject_serror_esr(vcpu, esr); + else + ret = kvm_inject_serror(vcpu); + + /* + * We could've decided that the SError is due for immediate software + * injection; commit the exception in case userspace decides it wants + * to inject more exceptions for some strange reason. + */ + commit_pending_events(vcpu); + return (ret < 0) ? ret : 0; } u32 __attribute_const__ kvm_target_cpu(void) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index b73dc26bc44b4..a598072f36d2c 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -10,6 +10,7 @@ #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <linux/ubsan.h> #include <asm/esr.h> #include <asm/exception.h> @@ -31,7 +32,7 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *); static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) { if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr)) - kvm_inject_vabt(vcpu); + kvm_inject_serror(vcpu); } static int handle_hvc(struct kvm_vcpu *vcpu) @@ -251,7 +252,7 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu) return 1; } - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + if (is_nested_ctxt(vcpu)) { kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); return 1; } @@ -298,6 +299,75 @@ static int handle_svc(struct kvm_vcpu *vcpu) return 1; } +static int kvm_handle_gcs(struct kvm_vcpu *vcpu) +{ + /* We don't expect GCS, so treat it with contempt */ + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, GCS, IMP)) + WARN_ON_ONCE(1); + + kvm_inject_undefined(vcpu); + return 1; +} + +static int handle_other(struct kvm_vcpu *vcpu) +{ + bool allowed, fwd = is_nested_ctxt(vcpu); + u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2); + u64 esr = kvm_vcpu_get_esr(vcpu); + u64 iss = ESR_ELx_ISS(esr); + struct kvm *kvm = vcpu->kvm; + + /* + * We only trap for two reasons: + * + * - the feature is disabled, and the only outcome is to + * generate an UNDEF. + * + * - the feature is enabled, but a NV guest wants to trap the + * feature used by its L2 guest. We forward the exception in + * this case. + * + * What we don't expect is to end-up here if the guest is + * expected be be able to directly use the feature, hence the + * WARN_ON below. + */ + switch (iss) { + case ESR_ELx_ISS_OTHER_ST64BV: + allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V); + fwd &= !(hcrx & HCRX_EL2_EnASR); + break; + case ESR_ELx_ISS_OTHER_ST64BV0: + allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA); + fwd &= !(hcrx & HCRX_EL2_EnAS0); + break; + case ESR_ELx_ISS_OTHER_LDST64B: + allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64); + fwd &= !(hcrx & HCRX_EL2_EnALS); + break; + case ESR_ELx_ISS_OTHER_TSBCSYNC: + allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1); + fwd &= (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC); + break; + case ESR_ELx_ISS_OTHER_PSBCSYNC: + allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5); + fwd &= (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC); + break; + default: + /* Clearly, we're missing something. */ + WARN_ON_ONCE(1); + allowed = false; + } + + WARN_ON_ONCE(allowed && !fwd); + + if (allowed && fwd) + kvm_inject_nested_sync(vcpu, esr); + else + kvm_inject_undefined(vcpu); + + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, @@ -307,6 +377,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, [ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id, [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, + [ESR_ELx_EC_OTHER] = handle_other, [ESR_ELx_EC_HVC32] = handle_hvc, [ESR_ELx_EC_SMC32] = handle_smc, [ESR_ELx_EC_HVC64] = handle_hvc, @@ -317,6 +388,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_ERET] = kvm_handle_eret, [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, + [ESR_ELx_EC_DABT_CUR] = kvm_handle_vncr_abort, [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug, [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, @@ -324,6 +396,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, [ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd, [ESR_ELx_EC_PAC] = kvm_handle_ptrauth, + [ESR_ELx_EC_GCS] = kvm_handle_gcs, }; static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) @@ -417,7 +490,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) kvm_handle_guest_serror(vcpu, disr_to_esr(disr)); } else { - kvm_inject_vabt(vcpu); + kvm_inject_serror(vcpu); } return; @@ -474,6 +547,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, print_nvhe_hyp_panic("BUG", panic_addr); } else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) { kvm_nvhe_report_cfi_failure(panic_addr); + } else if (IS_ENABLED(CONFIG_UBSAN_KVM_EL2) && + ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 && + esr_is_ubsan_brk(esr)) { + print_nvhe_hyp_panic(report_ubsan_failure(esr & UBSAN_BRK_MASK), + panic_addr); } else { print_nvhe_hyp_panic("panic", panic_addr); } diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c index 424a5107cddb5..95d186e0bf54f 100644 --- a/arch/arm64/kvm/hyp/exception.c +++ b/arch/arm64/kvm/hyp/exception.c @@ -26,7 +26,8 @@ static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) if (unlikely(vcpu_has_nv(vcpu))) return vcpu_read_sys_reg(vcpu, reg); - else if (__vcpu_read_sys_reg_from_cpu(reg, &val)) + else if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) && + __vcpu_read_sys_reg_from_cpu(reg, &val)) return val; return __vcpu_sys_reg(vcpu, reg); @@ -36,8 +37,9 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) { if (unlikely(vcpu_has_nv(vcpu))) vcpu_write_sys_reg(vcpu, val, reg); - else if (!__vcpu_write_sys_reg_to_cpu(val, reg)) - __vcpu_sys_reg(vcpu, reg) = val; + else if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU) || + !__vcpu_write_sys_reg_to_cpu(val, reg)) + __vcpu_assign_sys_reg(vcpu, reg, val); } static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode, @@ -51,7 +53,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode, } else if (has_vhe()) { write_sysreg_el1(val, SYS_SPSR); } else { - __vcpu_sys_reg(vcpu, SPSR_EL1) = val; + __vcpu_assign_sys_reg(vcpu, SPSR_EL1, val); } } @@ -339,6 +341,10 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync); break; + case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR): + enter_exception64(vcpu, PSR_MODE_EL1h, except_type_serror); + break; + case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC): enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync); break; @@ -347,9 +353,13 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq); break; + case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR): + enter_exception64(vcpu, PSR_MODE_EL2h, except_type_serror); + break; + default: /* - * Only EL1_SYNC and EL2_{SYNC,IRQ} makes + * Only EL1_{SYNC,SERR} and EL2_{SYNC,IRQ,SERR} makes * sense so far. Everything else gets silently * ignored. */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 96f625dc72566..84ec4e100fbb9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) if (!vcpu_el1_is_32bit(vcpu)) return; - __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); + __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2)); } static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) @@ -65,12 +65,186 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } } +static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) +{ + u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA; + + /* + * Always trap SME since it's not supported in KVM. + * TSM is RES1 if SME isn't implemented. + */ + val |= CPTR_EL2_TSM; + + if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) + val |= CPTR_EL2_TZ; + + if (!guest_owns_fp_regs()) + val |= CPTR_EL2_TFP; + + write_sysreg(val, cptr_el2); +} + +static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu) +{ + /* + * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to + * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, + * except for some missing controls, such as TAM. + * In this case, CPTR_EL2.TAM has the same position with or without + * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM + * shift value for trapping the AMU accesses. + */ + u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA; + u64 cptr; + + if (guest_owns_fp_regs()) { + val |= CPACR_EL1_FPEN; + if (vcpu_has_sve(vcpu)) + val |= CPACR_EL1_ZEN; + } + + if (!vcpu_has_nv(vcpu)) + goto write; + + /* + * The architecture is a bit crap (what a surprise): an EL2 guest + * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA, + * as they are RES0 in the guest's view. To work around it, trap the + * sucker using the very same bit it can't set... + */ + if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu)) + val |= CPTR_EL2_TCPAC; + + /* + * Layer the guest hypervisor's trap configuration on top of our own if + * we're in a nested context. + */ + if (is_hyp_ctxt(vcpu)) + goto write; + + cptr = vcpu_sanitised_cptr_el2(vcpu); + + /* + * Pay attention, there's some interesting detail here. + * + * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two + * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest): + * + * - CPTR_EL2.xEN = x0, traps are enabled + * - CPTR_EL2.xEN = x1, traps are disabled + * + * In other words, bit[0] determines if guest accesses trap or not. In + * the interest of simplicity, clear the entire field if the guest + * hypervisor has traps enabled to dispel any illusion of something more + * complicated taking place. + */ + if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0))) + val &= ~CPACR_EL1_FPEN; + if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0))) + val &= ~CPACR_EL1_ZEN; + + if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) + val |= cptr & CPACR_EL1_E0POE; + + val |= cptr & CPTR_EL2_TCPAC; + +write: + write_sysreg(val, cpacr_el1); +} + +static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu) +{ + if (!guest_owns_fp_regs()) + __activate_traps_fpsimd32(vcpu); + + if (has_vhe() || has_hvhe()) + __activate_cptr_traps_vhe(vcpu); + else + __activate_cptr_traps_nvhe(vcpu); +} + +static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) +{ + u64 val = CPTR_NVHE_EL2_RES1; + + if (!cpus_have_final_cap(ARM64_SVE)) + val |= CPTR_EL2_TZ; + if (!cpus_have_final_cap(ARM64_SME)) + val |= CPTR_EL2_TSM; + + write_sysreg(val, cptr_el2); +} + +static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu) +{ + u64 val = CPACR_EL1_FPEN; + + if (cpus_have_final_cap(ARM64_SVE)) + val |= CPACR_EL1_ZEN; + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN; + + write_sysreg(val, cpacr_el1); +} + +static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) +{ + if (has_vhe() || has_hvhe()) + __deactivate_cptr_traps_vhe(vcpu); + else + __deactivate_cptr_traps_nvhe(vcpu); +} + +#define reg_to_fgt_masks(reg) \ + ({ \ + struct fgt_masks *m; \ + switch(reg) { \ + case HFGRTR_EL2: \ + m = &hfgrtr_masks; \ + break; \ + case HFGWTR_EL2: \ + m = &hfgwtr_masks; \ + break; \ + case HFGITR_EL2: \ + m = &hfgitr_masks; \ + break; \ + case HDFGRTR_EL2: \ + m = &hdfgrtr_masks; \ + break; \ + case HDFGWTR_EL2: \ + m = &hdfgwtr_masks; \ + break; \ + case HAFGRTR_EL2: \ + m = &hafgrtr_masks; \ + break; \ + case HFGRTR2_EL2: \ + m = &hfgrtr2_masks; \ + break; \ + case HFGWTR2_EL2: \ + m = &hfgwtr2_masks; \ + break; \ + case HFGITR2_EL2: \ + m = &hfgitr2_masks; \ + break; \ + case HDFGRTR2_EL2: \ + m = &hdfgrtr2_masks; \ + break; \ + case HDFGWTR2_EL2: \ + m = &hdfgwtr2_masks; \ + break; \ + default: \ + BUILD_BUG_ON(1); \ + } \ + \ + m; \ + }) + #define compute_clr_set(vcpu, reg, clr, set) \ do { \ - u64 hfg; \ - hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ - set |= hfg & __ ## reg ## _MASK; \ - clr |= ~hfg & __ ## reg ## _nMASK; \ + u64 hfg = __vcpu_sys_reg(vcpu, reg); \ + struct fgt_masks *m = reg_to_fgt_masks(reg); \ + set |= hfg & m->mask; \ + clr |= ~hfg & m->nmask; \ } while(0) #define reg_to_fgt_group_id(reg) \ @@ -79,7 +253,7 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) switch(reg) { \ case HFGRTR_EL2: \ case HFGWTR_EL2: \ - id = HFGxTR_GROUP; \ + id = HFGRTR_GROUP; \ break; \ case HFGITR_EL2: \ id = HFGITR_GROUP; \ @@ -91,6 +265,17 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) case HAFGRTR_EL2: \ id = HAFGRTR_GROUP; \ break; \ + case HFGRTR2_EL2: \ + case HFGWTR2_EL2: \ + id = HFGRTR2_GROUP; \ + break; \ + case HFGITR2_EL2: \ + id = HFGITR2_GROUP; \ + break; \ + case HDFGRTR2_EL2: \ + case HDFGWTR2_EL2: \ + id = HDFGRTR2_GROUP; \ + break; \ default: \ BUILD_BUG_ON(1); \ } \ @@ -101,44 +286,32 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) #define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \ do { \ u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \ - set |= hfg & __ ## reg ## _MASK; \ - clr |= hfg & __ ## reg ## _nMASK; \ + struct fgt_masks *m = reg_to_fgt_masks(reg); \ + set |= hfg & m->mask; \ + clr |= hfg & m->nmask; \ } while(0) #define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \ do { \ - u64 c = 0, s = 0; \ + struct fgt_masks *m = reg_to_fgt_masks(reg); \ + u64 c = clr, s = set; \ + u64 val; \ \ ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \ + if (is_nested_ctxt(vcpu)) \ compute_clr_set(vcpu, reg, c, s); \ \ compute_undef_clr_set(vcpu, kvm, reg, c, s); \ \ - s |= set; \ - c |= clr; \ - if (c || s) { \ - u64 val = __ ## reg ## _nMASK; \ - val |= s; \ - val &= ~c; \ - write_sysreg_s(val, SYS_ ## reg); \ - } \ + val = m->nmask; \ + val |= s; \ + val &= ~c; \ + write_sysreg_s(val, SYS_ ## reg); \ } while(0) #define update_fgt_traps(hctxt, vcpu, kvm, reg) \ update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0) -/* - * Validate the fine grain trap masks. - * Check that the masks do not overlap and that all bits are accounted for. - */ -#define CHECK_FGT_MASKS(reg) \ - do { \ - BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \ - BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \ - (__ ## reg ## _nMASK))); \ - } while(0) - static inline bool cpu_has_amu(void) { u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); @@ -152,56 +325,60 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); struct kvm *kvm = kern_hyp_va(vcpu->kvm); - CHECK_FGT_MASKS(HFGRTR_EL2); - CHECK_FGT_MASKS(HFGWTR_EL2); - CHECK_FGT_MASKS(HFGITR_EL2); - CHECK_FGT_MASKS(HDFGRTR_EL2); - CHECK_FGT_MASKS(HDFGWTR_EL2); - CHECK_FGT_MASKS(HAFGRTR_EL2); - CHECK_FGT_MASKS(HCRX_EL2); - if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2); update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0, cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ? - HFGxTR_EL2_TCR_EL1_MASK : 0); + HFGWTR_EL2_TCR_EL1_MASK : 0); update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2); update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2); update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2); if (cpu_has_amu()) update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2); + + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) + return; + + update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2); + update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2); } -#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \ +#define __deactivate_fgt(htcxt, vcpu, reg) \ do { \ - if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \ - kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \ - write_sysreg_s(ctxt_sys_reg(hctxt, reg), \ - SYS_ ## reg); \ + write_sysreg_s(ctxt_sys_reg(hctxt, reg), \ + SYS_ ## reg); \ } while(0) static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); - struct kvm *kvm = kern_hyp_va(vcpu->kvm); if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; - __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2); - if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) - write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); - else - __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2); - __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2); - __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2); - __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2); + __deactivate_fgt(hctxt, vcpu, HFGRTR_EL2); + __deactivate_fgt(hctxt, vcpu, HFGWTR_EL2); + __deactivate_fgt(hctxt, vcpu, HFGITR_EL2); + __deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2); + __deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2); if (cpu_has_amu()) - __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2); + __deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2); + + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) + return; + + __deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2); + __deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2); + __deactivate_fgt(hctxt, vcpu, HFGITR2_EL2); + __deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2); + __deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2); } static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) @@ -259,13 +436,10 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) if (cpus_have_final_cap(ARM64_HAS_HCX)) { u64 hcrx = vcpu->arch.hcrx_el2; - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { - u64 clr = 0, set = 0; - - compute_clr_set(vcpu, HCRX_EL2, clr, set); - - hcrx |= set; - hcrx &= ~clr; + if (is_nested_ctxt(vcpu)) { + u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2); + hcrx |= val & __HCRX_EL2_MASK; + hcrx &= ~(~val & __HCRX_EL2_nMASK); } ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2); @@ -300,23 +474,58 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr) if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) hcr |= HCR_TVM; - write_sysreg(hcr, hcr_el2); + write_sysreg_hcr(hcr); + + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) { + u64 vsesr; + + /* + * When HCR_EL2.AMO is set, physical SErrors are taken to EL2 + * and vSError injection is enabled for EL1. Conveniently, for + * NV this means that it is never the case where a 'physical' + * SError (injected by KVM or userspace) and vSError are + * deliverable to the same context. + * + * As such, we can trivially select between the host or guest's + * VSESR_EL2. Except for the case that FEAT_RAS hasn't been + * exposed to the guest, where ESR propagation in hardware + * occurs unconditionally. + * + * Paper over the architectural wart and use an IMPLEMENTATION + * DEFINED ESR value in case FEAT_RAS is hidden from the guest. + */ + if (!vserror_state_is_nested(vcpu)) + vsesr = vcpu->arch.vsesr_el2; + else if (kvm_has_ras(kern_hyp_va(vcpu->kvm))) + vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2); + else + vsesr = ESR_ELx_ISV; - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) - write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); + write_sysreg_s(vsesr, SYS_VSESR_EL2); + } } static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) { + u64 *hcr; + + if (vserror_state_is_nested(vcpu)) + hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2); + else + hcr = &vcpu->arch.hcr_el2; + /* * If we pended a virtual abort, preserve it until it gets * cleared. See D1.14.3 (Virtual Interrupts) for details, but * the crucial bit is "On taking a vSError interrupt, * HCR_EL2.VSE is cleared to 0." + * + * Additionally, when in a nested context we need to propagate the + * updated state to the guest hypervisor's HCR_EL2. */ - if (vcpu->arch.hcr_el2 & HCR_VSE) { - vcpu->arch.hcr_el2 &= ~HCR_VSE; - vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; + if (*hcr & HCR_VSE) { + *hcr &= ~HCR_VSE; + *hcr |= read_sysreg(hcr_el2) & HCR_VSE; } } @@ -357,7 +566,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) * nested guest, as the guest hypervisor could select a smaller VL. Slap * that into hardware before wrapping up. */ - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) + if (is_nested_ctxt(vcpu)) sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2); write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR); @@ -383,7 +592,7 @@ static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu) if (vcpu_has_sve(vcpu)) { /* A guest hypervisor may restrict the effective max VL. */ - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) + if (is_nested_ctxt(vcpu)) zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2); else zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; @@ -412,7 +621,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu) */ if (vcpu_has_sve(vcpu)) { zcr_el1 = read_sysreg_el1(SYS_ZCR); - __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1; + __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1); /* * The guest's state is always saved using the guest's max VL. @@ -442,11 +651,6 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) */ if (system_supports_sve()) { __hyp_sve_save_host(); - - /* Re-enable SVE traps if not supported for the guest vcpu. */ - if (!vcpu_has_sve(vcpu)) - cpacr_clear_set(CPACR_EL1_ZEN, 0); - } else { __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); } @@ -497,10 +701,7 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Valid trap. Switch the context: */ /* First disable enough traps to allow us to update the registers */ - if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve())) - cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN); - else - cpacr_clear_set(0, CPACR_EL1_FPEN); + __deactivate_cptr_traps(vcpu); isb(); /* Write out the host state if it's in the registers */ @@ -522,6 +723,13 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED; + /* + * Re-enable traps necessary for the current state of the guest, e.g. + * those enabled by a guest hypervisor. The ERET to the guest will + * provide the necessary context synchronization. + */ + __activate_cptr_traps(vcpu); + return true; } diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index b9cff893bbe0b..a17cbe7582de9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -109,6 +109,28 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt) return kvm_has_s1poe(kern_hyp_va(vcpu->kvm)); } +static inline bool ctxt_has_ras(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_ras(kern_hyp_va(vcpu->kvm)); +} + +static inline bool ctxt_has_sctlr2(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_SCTLR2)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_sctlr2(kern_hyp_va(vcpu->kvm)); +} + static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); @@ -147,6 +169,9 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1); ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR); ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR); + + if (ctxt_has_sctlr2(ctxt)) + ctxt_sys_reg(ctxt, SCTLR2_EL1) = read_sysreg_el1(SYS_SCTLR2); } static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) @@ -159,8 +184,13 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) if (!has_vhe() && ctxt->__hyp_running_vcpu) ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + return; + + if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt))) ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); + else if (ctxt_has_ras(ctxt)) + ctxt_sys_reg(ctxt, VDISR_EL2) = read_sysreg_s(SYS_VDISR_EL2); } static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) @@ -252,6 +282,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt, write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1); write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR); write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR); + + if (ctxt_has_sctlr2(ctxt)) + write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR2_EL1), SYS_SCTLR2); } /* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */ @@ -275,6 +308,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx { u64 pstate = to_hw_pstate(ctxt); u64 mode = pstate & PSR_AA32_MODE_MASK; + u64 vdisr; /* * Safety check to ensure we're setting the CPU up to enter the guest @@ -293,8 +327,17 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx write_sysreg_el2(ctxt->regs.pc, SYS_ELR); write_sysreg_el2(pstate, SYS_SPSR); - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) - write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2); + if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + return; + + if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt))) + vdisr = ctxt_sys_reg(ctxt, DISR_EL1); + else if (ctxt_has_ras(ctxt)) + vdisr = ctxt_sys_reg(ctxt, VDISR_EL2); + else + vdisr = 0; + + write_sysreg_s(vdisr, SYS_VDISR_EL2); } static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) @@ -307,11 +350,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq); vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq); - __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2); - __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2); + __vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2)); + __vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2)); if (has_vhe() || kvm_debug_regs_in_use(vcpu)) - __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2); + __vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2)); } static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index ea0a704da9b89..5f9d56754e39f 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -39,12 +39,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages); int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages); int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages); int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages); -int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, +int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot); -int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm); +int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm); int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot); -int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm); -int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm); +int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm); +int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm); int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu); bool addr_is_memory(phys_addr_t phys); @@ -67,4 +67,10 @@ static __always_inline void __load_host_stage2(void) else write_sysreg(0, vttbr_el2); } + +#ifdef CONFIG_NVHE_EL2_DEBUG +void pkvm_ownership_selftest(void *base); +#else +static inline void pkvm_ownership_selftest(void *base) { } +#endif #endif /* __KVM_NVHE_MEM_PROTECT__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h index 34233d5860607..dee1a406b0c28 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h @@ -8,23 +8,30 @@ #include <linux/types.h> /* - * Bits 0-1 are reserved to track the memory ownership state of each page: - * 00: The page is owned exclusively by the page-table owner. - * 01: The page is owned by the page-table owner, but is shared - * with another entity. - * 10: The page is shared with, but not owned by the page-table owner. - * 11: Reserved for future use (lending). + * Bits 0-1 are used to encode the memory ownership state of each page from the + * point of view of a pKVM "component" (host, hyp, guest, ... see enum + * pkvm_component_id): + * 00: The page is owned and exclusively accessible by the component; + * 01: The page is owned and accessible by the component, but is also + * accessible by another component; + * 10: The page is accessible but not owned by the component; + * The storage of this state depends on the component: either in the + * hyp_vmemmap for the host and hyp states or in PTE software bits for guests. */ enum pkvm_page_state { PKVM_PAGE_OWNED = 0ULL, PKVM_PAGE_SHARED_OWNED = BIT(0), PKVM_PAGE_SHARED_BORROWED = BIT(1), - __PKVM_PAGE_RESERVED = BIT(0) | BIT(1), - /* Meta-states which aren't encoded directly in the PTE's SW bits */ - PKVM_NOPAGE = BIT(2), + /* + * 'Meta-states' are not stored directly in PTE SW bits for guest + * states, but inferred from the context (e.g. invalid PTE entries). + * For the host and hyp, meta-states are stored directly in the + * struct hyp_page. + */ + PKVM_NOPAGE = BIT(0) | BIT(1), }; -#define PKVM_PAGE_META_STATES_MASK (~__PKVM_PAGE_RESERVED) +#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1)) #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1) static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot, @@ -44,8 +51,15 @@ struct hyp_page { u16 refcount; u8 order; - /* Host (non-meta) state. Guarded by the host stage-2 lock. */ - enum pkvm_page_state host_state : 8; + /* Host state. Guarded by the host stage-2 lock. */ + unsigned __host_state : 4; + + /* + * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use + * the complement so that the initial 0 in __hyp_state_comp (due to the + * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE. + */ + unsigned __hyp_state_comp : 4; u32 host_share_guest_count; }; @@ -82,6 +96,26 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys) #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page)) #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool) +static inline enum pkvm_page_state get_host_state(struct hyp_page *p) +{ + return p->__host_state; +} + +static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state) +{ + p->__host_state = state; +} + +static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p) +{ + return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK; +} + +static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state) +{ + p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK; +} + /* * Refcounting for 'struct hyp_page'. * hyp_pool::lock must be held if atomic access to the refcount is required. diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index 230e4f2527def..6e83ce35c2f2e 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -13,9 +13,11 @@ extern struct kvm_pgtable pkvm_pgtable; extern hyp_spinlock_t pkvm_pgd_lock; -int hyp_create_pcpu_fixmap(void); +int hyp_create_fixmap(void); void *hyp_fixmap_map(phys_addr_t phys); void hyp_fixmap_unmap(void); +void *hyp_fixblock_map(phys_addr_t phys, size_t *size); +void hyp_fixblock_unmap(void); int hyp_create_idmap(u32 hyp_va_bits); int hyp_map_vectors(void); diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index b43426a493df5..0b0a68b663d4b 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -12,7 +12,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__ ccflags-y += -fno-stack-protector \ -DDISABLE_BRANCH_PROFILING \ - $(DISABLE_STACKLEAK_PLUGIN) + $(DISABLE_KSTACK_ERASE) hostprogs := gen-hyprel HOST_EXTRACFLAGS += -I$(objtree)/include @@ -99,3 +99,9 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAG # causes a build failure. Remove profile optimization flags. KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS)) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + +ifeq ($(CONFIG_UBSAN_KVM_EL2),y) +UBSAN_SANITIZE := y +# Always use brk and not hooks +ccflags-y += $(CFLAGS_UBSAN_TRAP) +endif diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index 2f4a4f5036bb5..2a1c0f49792bf 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -92,12 +92,42 @@ static void __trace_switch_to_host(void) *host_data_ptr(host_debug_state.trfcr_el1)); } +static void __debug_save_brbe(u64 *brbcr_el1) +{ + *brbcr_el1 = 0; + + /* Check if the BRBE is enabled */ + if (!(read_sysreg_el1(SYS_BRBCR) & (BRBCR_ELx_E0BRE | BRBCR_ELx_ExBRE))) + return; + + /* + * Prohibit branch record generation while we are in guest. + * Since access to BRBCR_EL1 is trapped, the guest can't + * modify the filtering set by the host. + */ + *brbcr_el1 = read_sysreg_el1(SYS_BRBCR); + write_sysreg_el1(0, SYS_BRBCR); +} + +static void __debug_restore_brbe(u64 brbcr_el1) +{ + if (!brbcr_el1) + return; + + /* Restore BRBE controls */ + write_sysreg_el1(brbcr_el1, SYS_BRBCR); +} + void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ if (host_data_test_flag(HAS_SPE)) __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); + /* Disable BRBE branch records */ + if (host_data_test_flag(HAS_BRBE)) + __debug_save_brbe(host_data_ptr(host_debug_state.brbcr_el1)); + if (__trace_needs_switch()) __trace_switch_to_guest(); } @@ -111,6 +141,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { if (host_data_test_flag(HAS_SPE)) __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); + if (host_data_test_flag(HAS_BRBE)) + __debug_restore_brbe(*host_data_ptr(host_debug_state.brbcr_el1)); if (__trace_needs_switch()) __trace_switch_to_host(); } diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 58f0cb2298cc2..eef15b374abb0 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -124,7 +124,7 @@ SYM_FUNC_START(__hyp_do_panic) /* Ensure host stage-2 is disabled */ mrs x0, hcr_el2 bic x0, x0, #HCR_VM - msr hcr_el2, x0 + msr_hcr_el2 x0 isb tlbi vmalls12e1 dsb nsh diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index f8af11189572f..aada42522e7be 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -100,7 +100,7 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) msr mair_el2, x1 ldr x1, [x0, #NVHE_INIT_HCR_EL2] - msr hcr_el2, x1 + msr_hcr_el2 x1 mov x2, #HCR_E2H and x2, x1, x2 @@ -262,7 +262,7 @@ reset: alternative_if ARM64_KVM_PROTECTED_MODE mov_q x5, HCR_HOST_NVHE_FLAGS - msr hcr_el2, x5 + msr_hcr_el2 x5 alternative_else_nop_endif /* Install stub vectors */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 2c37680d954cf..3206b2c07f82a 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu) { - __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); + __vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR)); /* * On saving/restoring guest sve state, always use the maximum VL for * the guest. The layout of the data when saving the sve state depends @@ -69,7 +69,10 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) if (!guest_owns_fp_regs()) return; - cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN); + /* + * Traps have been disabled by __deactivate_cptr_traps(), but there + * hasn't necessarily been a context synchronization event yet. + */ isb(); if (vcpu_has_sve(vcpu)) @@ -79,7 +82,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); if (has_fpmr) - __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR); + __vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR)); if (system_supports_sve()) __hyp_sve_restore_host(); @@ -123,10 +126,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; - hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); - /* Limit guest vector length to the maximum supported by the host. */ - hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl); - hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE); hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) & @@ -249,7 +248,8 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(u64, pfn, host_ctxt, 1); DECLARE_REG(u64, gfn, host_ctxt, 2); - DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3); + DECLARE_REG(u64, nr_pages, host_ctxt, 3); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4); struct pkvm_hyp_vcpu *hyp_vcpu; int ret = -EINVAL; @@ -264,7 +264,7 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt) if (ret) goto out; - ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot); + ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot); out: cpu_reg(host_ctxt, 1) = ret; } @@ -273,6 +273,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); DECLARE_REG(u64, gfn, host_ctxt, 2); + DECLARE_REG(u64, nr_pages, host_ctxt, 3); struct pkvm_hyp_vm *hyp_vm; int ret = -EINVAL; @@ -283,7 +284,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt) if (!hyp_vm) goto out; - ret = __pkvm_host_unshare_guest(gfn, hyp_vm); + ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm); put_pkvm_hyp_vm(hyp_vm); out: cpu_reg(host_ctxt, 1) = ret; @@ -312,6 +313,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt { DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); DECLARE_REG(u64, gfn, host_ctxt, 2); + DECLARE_REG(u64, nr_pages, host_ctxt, 3); struct pkvm_hyp_vm *hyp_vm; int ret = -EINVAL; @@ -322,7 +324,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt if (!hyp_vm) goto out; - ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm); + ret = __pkvm_host_wrprotect_guest(gfn, nr_pages, hyp_vm); put_pkvm_hyp_vm(hyp_vm); out: cpu_reg(host_ctxt, 1) = ret; @@ -332,7 +334,8 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho { DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); DECLARE_REG(u64, gfn, host_ctxt, 2); - DECLARE_REG(bool, mkold, host_ctxt, 3); + DECLARE_REG(u64, nr_pages, host_ctxt, 3); + DECLARE_REG(bool, mkold, host_ctxt, 4); struct pkvm_hyp_vm *hyp_vm; int ret = -EINVAL; @@ -343,7 +346,7 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho if (!hyp_vm) goto out; - ret = __pkvm_host_test_clear_young_guest(gfn, mkold, hyp_vm); + ret = __pkvm_host_test_clear_young_guest(gfn, nr_pages, mkold, hyp_vm); put_pkvm_hyp_vm(hyp_vm); out: cpu_reg(host_ctxt, 1) = ret; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index f4562f417d3fc..d724f6d69302a 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -25,5 +25,7 @@ SECTIONS { BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) END_HYP_SECTION + HYP_SECTION(.bss) + HYP_SECTION(.data) } diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index e80f3ebd3e2a2..8957734d6183e 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -60,6 +60,11 @@ static void hyp_unlock_component(void) hyp_spin_unlock(&pkvm_pgd_lock); } +#define for_each_hyp_page(__p, __st, __sz) \ + for (struct hyp_page *__p = hyp_phys_to_page(__st), \ + *__e = __p + ((__sz) >> PAGE_SHIFT); \ + __p < __e; __p++) + static void *host_s2_zalloc_pages_exact(size_t size) { void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); @@ -161,12 +166,6 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) return 0; } -static bool guest_stage2_force_pte_cb(u64 addr, u64 end, - enum kvm_pgtable_prot prot) -{ - return true; -} - static void *guest_s2_zalloc_pages_exact(size_t size) { void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size)); @@ -217,16 +216,42 @@ static void guest_s2_put_page(void *addr) hyp_put_page(¤t_vm->pool, addr); } +static void __apply_guest_page(void *va, size_t size, + void (*func)(void *addr, size_t size)) +{ + size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE); + va = PTR_ALIGN_DOWN(va, PAGE_SIZE); + size = PAGE_ALIGN(size); + + while (size) { + size_t map_size = PAGE_SIZE; + void *map; + + if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE) + map = hyp_fixblock_map(__hyp_pa(va), &map_size); + else + map = hyp_fixmap_map(__hyp_pa(va)); + + func(map, map_size); + + if (map_size == PMD_SIZE) + hyp_fixblock_unmap(); + else + hyp_fixmap_unmap(); + + size -= map_size; + va += map_size; + } +} + static void clean_dcache_guest_page(void *va, size_t size) { - __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size); - hyp_fixmap_unmap(); + __apply_guest_page(va, size, __clean_dcache_guest_page); } static void invalidate_icache_guest_page(void *va, size_t size) { - __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size); - hyp_fixmap_unmap(); + __apply_guest_page(va, size, __invalidate_icache_guest_page); } int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) @@ -255,8 +280,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) }; guest_lock_component(vm); - ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, - guest_stage2_force_pte_cb); + ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); guest_unlock_component(vm); if (ret) return ret; @@ -309,7 +333,7 @@ int __pkvm_prot_finalize(void) */ kvm_flush_dcache_to_poc(params, sizeof(*params)); - write_sysreg(params->hcr_el2, hcr_el2); + write_sysreg_hcr(params->hcr_el2); __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch); /* @@ -455,6 +479,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) { struct kvm_mem_range cur; kvm_pte_t pte; + u64 granule; s8 level; int ret; @@ -467,22 +492,26 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) return -EAGAIN; if (pte) { - WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE); + WARN_ON(addr_is_memory(addr) && + get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE); return -EPERM; } - do { - u64 granule = kvm_granule_size(level); + for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) { + if (!kvm_level_supports_block_mapping(level)) + continue; + granule = kvm_granule_size(level); cur.start = ALIGN_DOWN(addr, granule); cur.end = cur.start + granule; - level++; - } while ((level <= KVM_PGTABLE_LAST_LEVEL) && - !(kvm_level_supports_block_mapping(level) && - range_included(&cur, range))); + if (!range_included(&cur, range)) + continue; + *range = cur; + return 0; + } - *range = cur; + WARN_ON(1); - return 0; + return -EINVAL; } int host_stage2_idmap_locked(phys_addr_t addr, u64 size, @@ -493,10 +522,8 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size, static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state) { - phys_addr_t end = addr + size; - - for (; addr < end; addr += PAGE_SIZE) - hyp_phys_to_page(addr)->host_state = state; + for_each_hyp_page(page, addr, size) + set_host_state(page, state); } int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) @@ -618,16 +645,16 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, static int __host_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state) { - u64 end = addr + size; int ret; - ret = check_range_allowed_memory(addr, end); + ret = check_range_allowed_memory(addr, addr + size); if (ret) return ret; hyp_assert_lock_held(&host_mmu.lock); - for (; addr < end; addr += PAGE_SIZE) { - if (hyp_phys_to_page(addr)->host_state != state) + + for_each_hyp_page(page, addr, size) { + if (get_host_state(page) != state) return -EPERM; } @@ -637,7 +664,7 @@ static int __host_check_page_state_range(u64 addr, u64 size, static int __host_set_page_state_range(u64 addr, u64 size, enum pkvm_page_state state) { - if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) { + if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) { int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT); if (ret) @@ -649,24 +676,20 @@ static int __host_set_page_state_range(u64 addr, u64 size, return 0; } -static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) +static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) { - if (!kvm_pte_valid(pte)) - return PKVM_NOPAGE; - - return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte)); + for_each_hyp_page(page, phys, size) + set_hyp_state(page, state); } -static int __hyp_check_page_state_range(u64 addr, u64 size, - enum pkvm_page_state state) +static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) { - struct check_walk_data d = { - .desired = state, - .get_page_state = hyp_get_page_state, - }; + for_each_hyp_page(page, phys, size) { + if (get_hyp_state(page) != state) + return -EPERM; + } - hyp_assert_lock_held(&pkvm_pgd_lock); - return check_page_state_range(&pkvm_pgtable, addr, size, &d); + return 0; } static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr) @@ -677,10 +700,9 @@ static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr) return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); } -static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr, +static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr, u64 size, enum pkvm_page_state state) { - struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); struct check_walk_data d = { .desired = state, .get_page_state = guest_get_page_state, @@ -693,8 +715,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr, int __pkvm_host_share_hyp(u64 pfn) { u64 phys = hyp_pfn_to_phys(pfn); - void *virt = __hyp_va(phys); - enum kvm_pgtable_prot prot; u64 size = PAGE_SIZE; int ret; @@ -704,14 +724,11 @@ int __pkvm_host_share_hyp(u64 pfn) ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); if (ret) goto unlock; - if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { - ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); - if (ret) - goto unlock; - } + ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); + if (ret) + goto unlock; - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); + __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); unlock: @@ -734,7 +751,7 @@ int __pkvm_host_unshare_hyp(u64 pfn) ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); if (ret) goto unlock; - ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED); + ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); if (ret) goto unlock; if (hyp_page_count((void *)virt)) { @@ -742,7 +759,7 @@ int __pkvm_host_unshare_hyp(u64 pfn) goto unlock; } - WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); + __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); unlock: @@ -757,7 +774,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) u64 phys = hyp_pfn_to_phys(pfn); u64 size = PAGE_SIZE * nr_pages; void *virt = __hyp_va(phys); - enum kvm_pgtable_prot prot; int ret; host_lock_component(); @@ -766,14 +782,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); if (ret) goto unlock; - if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { - ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); - if (ret) - goto unlock; - } + ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); + if (ret) + goto unlock; - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED); - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); + __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED); + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP)); WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP)); unlock: @@ -793,15 +807,14 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) host_lock_component(); hyp_lock_component(); - ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED); + ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED); + if (ret) + goto unlock; + ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); if (ret) goto unlock; - if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { - ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); - if (ret) - goto unlock; - } + __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST)); @@ -816,24 +829,30 @@ int hyp_pin_shared_mem(void *from, void *to) { u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); u64 end = PAGE_ALIGN((u64)to); + u64 phys = __hyp_pa(start); u64 size = end - start; + struct hyp_page *p; int ret; host_lock_component(); hyp_lock_component(); - ret = __host_check_page_state_range(__hyp_pa(start), size, - PKVM_PAGE_SHARED_OWNED); + ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); if (ret) goto unlock; - ret = __hyp_check_page_state_range(start, size, - PKVM_PAGE_SHARED_BORROWED); + ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); if (ret) goto unlock; - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_inc(hyp_virt_to_page(cur)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + p = hyp_virt_to_page(cur); + hyp_page_ref_inc(p); + if (p->refcount == 1) + WARN_ON(pkvm_create_mappings_locked((void *)cur, + (void *)cur + PAGE_SIZE, + PAGE_HYP)); + } unlock: hyp_unlock_component(); @@ -846,12 +865,17 @@ void hyp_unpin_shared_mem(void *from, void *to) { u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); u64 end = PAGE_ALIGN((u64)to); + struct hyp_page *p; host_lock_component(); hyp_lock_component(); - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_dec(hyp_virt_to_page(cur)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + p = hyp_virt_to_page(cur); + if (p->refcount == 1) + WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE); + hyp_page_ref_dec(p); + } hyp_unlock_component(); host_unlock_component(); @@ -887,49 +911,84 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) return ret; } -int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, +static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size) +{ + size_t block_size; + + if (nr_pages == 1) { + *size = PAGE_SIZE; + return 0; + } + + /* We solely support second to last level huge mapping */ + block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1); + + if (nr_pages != block_size >> PAGE_SHIFT) + return -EINVAL; + + if (!IS_ALIGNED(phys | ipa, block_size)) + return -EINVAL; + + *size = block_size; + return 0; +} + +int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) { struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); u64 phys = hyp_pfn_to_phys(pfn); u64 ipa = hyp_pfn_to_phys(gfn); - struct hyp_page *page; + u64 size; int ret; if (prot & ~KVM_PGTABLE_PROT_RWX) return -EINVAL; - ret = check_range_allowed_memory(phys, phys + PAGE_SIZE); + ret = __guest_check_transition_size(phys, ipa, nr_pages, &size); + if (ret) + return ret; + + ret = check_range_allowed_memory(phys, phys + size); if (ret) return ret; host_lock_component(); guest_lock_component(vm); - ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE); + ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE); if (ret) goto unlock; - page = hyp_phys_to_page(phys); - switch (page->host_state) { - case PKVM_PAGE_OWNED: - WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED)); - break; - case PKVM_PAGE_SHARED_OWNED: - if (page->host_share_guest_count) - break; - /* Only host to np-guest multi-sharing is tolerated */ - WARN_ON(1); - fallthrough; - default: - ret = -EPERM; - goto unlock; + for_each_hyp_page(page, phys, size) { + switch (get_host_state(page)) { + case PKVM_PAGE_OWNED: + continue; + case PKVM_PAGE_SHARED_OWNED: + if (page->host_share_guest_count == U32_MAX) { + ret = -EBUSY; + goto unlock; + } + + /* Only host to np-guest multi-sharing is tolerated */ + if (page->host_share_guest_count) + continue; + + fallthrough; + default: + ret = -EPERM; + goto unlock; + } } - WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys, + for_each_hyp_page(page, phys, size) { + set_host_state(page, PKVM_PAGE_SHARED_OWNED); + page->host_share_guest_count++; + } + + WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys, pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED), &vcpu->vcpu.arch.pkvm_memcache, 0)); - page->host_share_guest_count++; unlock: guest_unlock_component(vm); @@ -938,10 +997,9 @@ unlock: return ret; } -static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa) +static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size) { enum pkvm_page_state state; - struct hyp_page *page; kvm_pte_t pte; u64 phys; s8 level; @@ -952,7 +1010,7 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip return ret; if (!kvm_pte_valid(pte)) return -ENOENT; - if (level != KVM_PGTABLE_LAST_LEVEL) + if (kvm_granule_size(level) != size) return -E2BIG; state = guest_get_page_state(pte, ipa); @@ -960,43 +1018,49 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip return -EPERM; phys = kvm_pte_to_phys(pte); - ret = check_range_allowed_memory(phys, phys + PAGE_SIZE); + ret = check_range_allowed_memory(phys, phys + size); if (WARN_ON(ret)) return ret; - page = hyp_phys_to_page(phys); - if (page->host_state != PKVM_PAGE_SHARED_OWNED) - return -EPERM; - if (WARN_ON(!page->host_share_guest_count)) - return -EINVAL; + for_each_hyp_page(page, phys, size) { + if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED) + return -EPERM; + if (WARN_ON(!page->host_share_guest_count)) + return -EINVAL; + } *__phys = phys; return 0; } -int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm) +int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) { u64 ipa = hyp_pfn_to_phys(gfn); - struct hyp_page *page; - u64 phys; + u64 size, phys; int ret; + ret = __guest_check_transition_size(0, ipa, nr_pages, &size); + if (ret) + return ret; + host_lock_component(); guest_lock_component(vm); - ret = __check_host_shared_guest(vm, &phys, ipa); + ret = __check_host_shared_guest(vm, &phys, ipa, size); if (ret) goto unlock; - ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); + ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size); if (ret) goto unlock; - page = hyp_phys_to_page(phys); - page->host_share_guest_count--; - if (!page->host_share_guest_count) - WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED)); + for_each_hyp_page(page, phys, size) { + /* __check_host_shared_guest() protects against underflow */ + page->host_share_guest_count--; + if (!page->host_share_guest_count) + set_host_state(page, PKVM_PAGE_OWNED); + } unlock: guest_unlock_component(vm); @@ -1005,7 +1069,7 @@ unlock: return ret; } -static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa) +static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size) { u64 phys; int ret; @@ -1016,7 +1080,7 @@ static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa) host_lock_component(); guest_lock_component(vm); - ret = __check_host_shared_guest(vm, &phys, ipa); + ret = __check_host_shared_guest(vm, &phys, ipa, size); guest_unlock_component(vm); host_unlock_component(); @@ -1036,7 +1100,7 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_ if (prot & ~KVM_PGTABLE_PROT_RWX) return -EINVAL; - assert_host_shared_guest(vm, ipa); + assert_host_shared_guest(vm, ipa, PAGE_SIZE); guest_lock_component(vm); ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); guest_unlock_component(vm); @@ -1044,33 +1108,41 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_ return ret; } -int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm) +int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) { - u64 ipa = hyp_pfn_to_phys(gfn); + u64 size, ipa = hyp_pfn_to_phys(gfn); int ret; if (pkvm_hyp_vm_is_protected(vm)) return -EPERM; - assert_host_shared_guest(vm, ipa); + ret = __guest_check_transition_size(0, ipa, nr_pages, &size); + if (ret) + return ret; + + assert_host_shared_guest(vm, ipa, size); guest_lock_component(vm); - ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE); + ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size); guest_unlock_component(vm); return ret; } -int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm) +int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm) { - u64 ipa = hyp_pfn_to_phys(gfn); + u64 size, ipa = hyp_pfn_to_phys(gfn); int ret; if (pkvm_hyp_vm_is_protected(vm)) return -EPERM; - assert_host_shared_guest(vm, ipa); + ret = __guest_check_transition_size(0, ipa, nr_pages, &size); + if (ret) + return ret; + + assert_host_shared_guest(vm, ipa, size); guest_lock_component(vm); - ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold); + ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold); guest_unlock_component(vm); return ret; @@ -1084,10 +1156,210 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu) if (pkvm_hyp_vm_is_protected(vm)) return -EPERM; - assert_host_shared_guest(vm, ipa); + assert_host_shared_guest(vm, ipa, PAGE_SIZE); guest_lock_component(vm); kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); guest_unlock_component(vm); return 0; } + +#ifdef CONFIG_NVHE_EL2_DEBUG +struct pkvm_expected_state { + enum pkvm_page_state host; + enum pkvm_page_state hyp; + enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */ +}; + +static struct pkvm_expected_state selftest_state; +static struct hyp_page *selftest_page; + +static struct pkvm_hyp_vm selftest_vm = { + .kvm = { + .arch = { + .mmu = { + .arch = &selftest_vm.kvm.arch, + .pgt = &selftest_vm.pgt, + }, + }, + }, +}; + +static struct pkvm_hyp_vcpu selftest_vcpu = { + .vcpu = { + .arch = { + .hw_mmu = &selftest_vm.kvm.arch.mmu, + }, + .kvm = &selftest_vm.kvm, + }, +}; + +static void init_selftest_vm(void *virt) +{ + struct hyp_page *p = hyp_virt_to_page(virt); + int i; + + selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; + WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt)); + + for (i = 0; i < pkvm_selftest_pages(); i++) { + if (p[i].refcount) + continue; + p[i].refcount = 1; + hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i])); + } +} + +static u64 selftest_ipa(void) +{ + return BIT(selftest_vm.pgt.ia_bits - 1); +} + +static void assert_page_state(void) +{ + void *virt = hyp_page_to_virt(selftest_page); + u64 size = PAGE_SIZE << selftest_page->order; + struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; + u64 phys = hyp_virt_to_phys(virt); + u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE }; + struct pkvm_hyp_vm *vm; + + vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); + + host_lock_component(); + WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host)); + host_unlock_component(); + + hyp_lock_component(); + WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp)); + hyp_unlock_component(); + + guest_lock_component(&selftest_vm); + WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0])); + WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1])); + guest_unlock_component(&selftest_vm); +} + +#define assert_transition_res(res, fn, ...) \ + do { \ + WARN_ON(fn(__VA_ARGS__) != res); \ + assert_page_state(); \ + } while (0) + +void pkvm_ownership_selftest(void *base) +{ + enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX; + void *virt = hyp_alloc_pages(&host_s2_pool, 0); + struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; + struct pkvm_hyp_vm *vm = &selftest_vm; + u64 phys, size, pfn, gfn; + + WARN_ON(!virt); + selftest_page = hyp_virt_to_page(virt); + selftest_page->refcount = 0; + init_selftest_vm(base); + + size = PAGE_SIZE << selftest_page->order; + phys = hyp_virt_to_phys(virt); + pfn = hyp_phys_to_pfn(phys); + gfn = hyp_phys_to_pfn(selftest_ipa()); + + selftest_state.host = PKVM_NOPAGE; + selftest_state.hyp = PKVM_PAGE_OWNED; + selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE; + assert_page_state(); + assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); + assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); + assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); + + selftest_state.host = PKVM_PAGE_OWNED; + selftest_state.hyp = PKVM_NOPAGE; + assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); + assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); + assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); + + selftest_state.host = PKVM_PAGE_SHARED_OWNED; + selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED; + assert_transition_res(0, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); + + assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); + assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); + hyp_unpin_shared_mem(virt, virt + size); + WARN_ON(hyp_page_count(virt) != 1); + assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); + + hyp_unpin_shared_mem(virt, virt + size); + assert_page_state(); + WARN_ON(hyp_page_count(virt)); + + selftest_state.host = PKVM_PAGE_OWNED; + selftest_state.hyp = PKVM_NOPAGE; + assert_transition_res(0, __pkvm_host_unshare_hyp, pfn); + + selftest_state.host = PKVM_PAGE_SHARED_OWNED; + selftest_state.hyp = PKVM_NOPAGE; + assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); + assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); + + selftest_state.host = PKVM_PAGE_OWNED; + selftest_state.hyp = PKVM_NOPAGE; + assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); + + selftest_state.host = PKVM_PAGE_SHARED_OWNED; + selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED; + assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); + assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); + assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); + assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); + assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); + + selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED; + assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot); + WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2); + + selftest_state.guest[0] = PKVM_NOPAGE; + assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm); + + selftest_state.guest[1] = PKVM_NOPAGE; + selftest_state.host = PKVM_PAGE_OWNED; + assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm); + + selftest_state.host = PKVM_NOPAGE; + selftest_state.hyp = PKVM_PAGE_OWNED; + assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1); + + selftest_page->refcount = 1; + hyp_put_page(&host_s2_pool, virt); +} +#endif diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index f41c7440b34b4..ae8391baebc30 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -229,9 +229,8 @@ int hyp_map_vectors(void) return 0; } -void *hyp_fixmap_map(phys_addr_t phys) +static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys) { - struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots); kvm_pte_t pte, *ptep = slot->ptep; pte = *ptep; @@ -243,10 +242,21 @@ void *hyp_fixmap_map(phys_addr_t phys) return (void *)slot->addr; } +void *hyp_fixmap_map(phys_addr_t phys) +{ + return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys); +} + static void fixmap_clear_slot(struct hyp_fixmap_slot *slot) { kvm_pte_t *ptep = slot->ptep; u64 addr = slot->addr; + u32 level; + + if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE) + level = KVM_PGTABLE_LAST_LEVEL; + else + level = KVM_PGTABLE_LAST_LEVEL - 1; /* create_fixblock() guarantees PMD level */ WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID); @@ -260,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot) * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03 */ dsb(ishst); - __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL); + __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level); dsb(ish); isb(); } @@ -273,9 +283,9 @@ void hyp_fixmap_unmap(void) static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit) { - struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg); + struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg; - if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL) + if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level)) return -EINVAL; slot->addr = ctx->addr; @@ -296,13 +306,84 @@ static int create_fixmap_slot(u64 addr, u64 cpu) struct kvm_pgtable_walker walker = { .cb = __create_fixmap_slot_cb, .flags = KVM_PGTABLE_WALK_LEAF, - .arg = (void *)cpu, + .arg = per_cpu_ptr(&fixmap_slots, cpu), }; return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker); } -int hyp_create_pcpu_fixmap(void) +#if PAGE_SHIFT < 16 +#define HAS_FIXBLOCK +static struct hyp_fixmap_slot hyp_fixblock_slot; +static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock); +#endif + +static int create_fixblock(void) +{ +#ifdef HAS_FIXBLOCK + struct kvm_pgtable_walker walker = { + .cb = __create_fixmap_slot_cb, + .flags = KVM_PGTABLE_WALK_LEAF, + .arg = &hyp_fixblock_slot, + }; + unsigned long addr; + phys_addr_t phys; + int ret, i; + + /* Find a RAM phys address, PMD aligned */ + for (i = 0; i < hyp_memblock_nr; i++) { + phys = ALIGN(hyp_memory[i].base, PMD_SIZE); + if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size)) + break; + } + + if (i >= hyp_memblock_nr) + return -EINVAL; + + hyp_spin_lock(&pkvm_pgd_lock); + addr = ALIGN(__io_map_base, PMD_SIZE); + ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE); + if (ret) + goto unlock; + + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP); + if (ret) + goto unlock; + + ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker); + +unlock: + hyp_spin_unlock(&pkvm_pgd_lock); + + return ret; +#else + return 0; +#endif +} + +void *hyp_fixblock_map(phys_addr_t phys, size_t *size) +{ +#ifdef HAS_FIXBLOCK + *size = PMD_SIZE; + hyp_spin_lock(&hyp_fixblock_lock); + return fixmap_map_slot(&hyp_fixblock_slot, phys); +#else + *size = PAGE_SIZE; + return hyp_fixmap_map(phys); +#endif +} + +void hyp_fixblock_unmap(void) +{ +#ifdef HAS_FIXBLOCK + fixmap_clear_slot(&hyp_fixblock_slot); + hyp_spin_unlock(&hyp_fixblock_lock); +#else + hyp_fixmap_unmap(); +#endif +} + +int hyp_create_fixmap(void) { unsigned long addr, i; int ret; @@ -322,7 +403,7 @@ int hyp_create_pcpu_fixmap(void) return ret; } - return 0; + return create_fixblock(); } int hyp_create_idmap(u32 hyp_va_bits) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 5a335a51deca1..338505cb0171b 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -372,6 +372,18 @@ static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu) hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1); } +static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + void *sve_state; + + if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE)) + return; + + sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state); + hyp_unpin_shared_mem(sve_state, + sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu)); +} + static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[], unsigned int nr_vcpus) { @@ -384,6 +396,7 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[], continue; unpin_host_vcpu(hyp_vcpu->host_vcpu); + unpin_host_sve_state(hyp_vcpu); } } @@ -398,12 +411,40 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, pkvm_init_features_from_host(hyp_vm, host_kvm); } -static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu) +static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu) { struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; + unsigned int sve_max_vl; + size_t sve_state_size; + void *sve_state; + int ret = 0; - if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) + if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) { vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED); + return 0; + } + + /* Limit guest vector length to the maximum supported by the host. */ + sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl); + sve_state_size = sve_state_size_from_vl(sve_max_vl); + sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state)); + + if (!sve_state || !sve_state_size) { + ret = -EINVAL; + goto err; + } + + ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size); + if (ret) + goto err; + + vcpu->arch.sve_state = sve_state; + vcpu->arch.sve_max_vl = sve_max_vl; + + return 0; +err: + clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features); + return ret; } static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, @@ -432,7 +473,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, if (ret) goto done; - pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu); + ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu); done: if (ret) unpin_host_vcpu(host_vcpu); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index d62bcb5634a21..a48d3f5a5afba 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -28,6 +28,7 @@ static void *vmemmap_base; static void *vm_table_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; +static void *selftest_base; static void *ffa_proxy_pages; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct hyp_pool hpool; @@ -38,6 +39,11 @@ static int divide_memory_pool(void *virt, unsigned long size) hyp_early_alloc_init(virt, size); + nr_pages = pkvm_selftest_pages(); + selftest_base = hyp_early_alloc_contig(nr_pages); + if (nr_pages && !selftest_base) + return -ENOMEM; + nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page)); vmemmap_base = hyp_early_alloc_contig(nr_pages); if (!vmemmap_base) @@ -119,6 +125,10 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, if (ret) return ret; + ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP); + if (ret) + return ret; + ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO); if (ret) return ret; @@ -180,6 +190,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit) { enum pkvm_page_state state; + struct hyp_page *page; phys_addr_t phys; if (!kvm_pte_valid(ctx->old)) @@ -192,19 +203,25 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, if (!addr_is_memory(phys)) return -EINVAL; + page = hyp_phys_to_page(phys); + /* * Adjust the host stage-2 mappings to match the ownership attributes - * configured in the hypervisor stage-1. + * configured in the hypervisor stage-1, and make sure to propagate them + * to the hyp_vmemmap state. */ state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old)); switch (state) { case PKVM_PAGE_OWNED: + set_hyp_state(page, PKVM_PAGE_OWNED); return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP); case PKVM_PAGE_SHARED_OWNED: - hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED; + set_hyp_state(page, PKVM_PAGE_SHARED_OWNED); + set_host_state(page, PKVM_PAGE_SHARED_BORROWED); break; case PKVM_PAGE_SHARED_BORROWED: - hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED; + set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED); + set_host_state(page, PKVM_PAGE_SHARED_OWNED); break; default: return -EINVAL; @@ -295,7 +312,7 @@ void __noreturn __pkvm_init_finalise(void) if (ret) goto out; - ret = hyp_create_pcpu_fixmap(); + ret = hyp_create_fixmap(); if (ret) goto out; @@ -304,6 +321,8 @@ void __noreturn __pkvm_init_finalise(void) goto out; pkvm_hyp_vm_table_init(vm_table_base); + + pkvm_ownership_selftest(selftest_base); out: /* * We tail-called to here from handle___pkvm_init() and will not return, diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 7d2ba6ef02618..ccd575d5f6dec 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -33,66 +33,19 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); -extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); - -static void __activate_cptr_traps(struct kvm_vcpu *vcpu) -{ - u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - - if (!guest_owns_fp_regs()) - __activate_traps_fpsimd32(vcpu); - - if (has_hvhe()) { - val |= CPACR_EL1_TTA; - - if (guest_owns_fp_regs()) { - val |= CPACR_EL1_FPEN; - if (vcpu_has_sve(vcpu)) - val |= CPACR_EL1_ZEN; - } - - write_sysreg(val, cpacr_el1); - } else { - val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1; - - /* - * Always trap SME since it's not supported in KVM. - * TSM is RES1 if SME isn't implemented. - */ - val |= CPTR_EL2_TSM; - - if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) - val |= CPTR_EL2_TZ; - - if (!guest_owns_fp_regs()) - val |= CPTR_EL2_TFP; +struct fgt_masks hfgrtr_masks; +struct fgt_masks hfgwtr_masks; +struct fgt_masks hfgitr_masks; +struct fgt_masks hdfgrtr_masks; +struct fgt_masks hdfgwtr_masks; +struct fgt_masks hafgrtr_masks; +struct fgt_masks hfgrtr2_masks; +struct fgt_masks hfgwtr2_masks; +struct fgt_masks hfgitr2_masks; +struct fgt_masks hdfgrtr2_masks; +struct fgt_masks hdfgwtr2_masks; - write_sysreg(val, cptr_el2); - } -} - -static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) -{ - if (has_hvhe()) { - u64 val = CPACR_EL1_FPEN; - - if (cpus_have_final_cap(ARM64_SVE)) - val |= CPACR_EL1_ZEN; - if (cpus_have_final_cap(ARM64_SME)) - val |= CPACR_EL1_SMEN; - - write_sysreg(val, cpacr_el1); - } else { - u64 val = CPTR_NVHE_EL2_RES1; - - if (!cpus_have_final_cap(ARM64_SVE)) - val |= CPTR_EL2_TZ; - if (!cpus_have_final_cap(ARM64_SME)) - val |= CPTR_EL2_TSM; - - write_sysreg(val, cptr_el2); - } -} +extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); static void __activate_traps(struct kvm_vcpu *vcpu) { @@ -142,7 +95,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) __deactivate_traps_common(vcpu); - write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); + write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2); __deactivate_cptr_traps(vcpu); write_sysreg(__kvm_hyp_host_vector, vbar_el2); @@ -319,7 +272,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) * We're about to restore some new MMU state. Make sure * ongoing page-table walks that have started before we * trapped to EL2 have completed. This also synchronises the - * above disabling of SPE and TRBE. + * above disabling of BRBE, SPE and TRBE. * * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", * rule R_LFHQG and subsequent information statements. diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index df5cc74a7dd0d..c351b4abd5dbf 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -11,12 +11,6 @@ #include <asm/kvm_pgtable.h> #include <asm/stage2_pgtable.h> - -#define KVM_PTE_TYPE BIT(1) -#define KVM_PTE_TYPE_BLOCK 0 -#define KVM_PTE_TYPE_PAGE 1 -#define KVM_PTE_TYPE_TABLE 1 - struct kvm_pgtable_walk_data { struct kvm_pgtable_walker *walker; diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 50aa8dbcae75b..d81275790e69b 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -296,12 +296,19 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if) } /* - * Prevent the guest from touching the ICC_SRE_EL1 system - * register. Note that this may not have any effect, as - * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation. + * GICv5 BET0 FEAT_GCIE_LEGACY doesn't include ICC_SRE_EL2. This is due + * to be relaxed in a future spec release, at which point this in + * condition can be dropped. */ - write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, - ICC_SRE_EL2); + if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) { + /* + * Prevent the guest from touching the ICC_SRE_EL1 system + * register. Note that this may not have any effect, as + * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation. + */ + write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, + ICC_SRE_EL2); + } /* * If we need to trap system registers, we must write @@ -322,8 +329,14 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if) cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); } - val = read_gicreg(ICC_SRE_EL2); - write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); + /* + * Can be dropped in the future when GICv5 spec is relaxed. See comment + * above. + */ + if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) { + val = read_gicreg(ICC_SRE_EL2); + write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); + } if (!cpu_if->vgic_sre) { /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ @@ -423,10 +436,20 @@ void __vgic_v3_init_lrs(void) */ u64 __vgic_v3_get_gic_config(void) { - u64 val, sre = read_gicreg(ICC_SRE_EL1); + u64 val, sre; unsigned long flags = 0; /* + * In compat mode, we cannot access ICC_SRE_EL1 at any EL + * other than EL1 itself; just return the + * ICH_VTR_EL2. ICC_IDR0_EL1 is only implemented on a GICv5 + * system, so we first check if we have GICv5 support. + */ + if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) + return read_gicreg(ICH_VTR_EL2); + + sre = read_gicreg(ICC_SRE_EL1); + /* * To check whether we have a MMIO-based (GICv2 compatible) * CPU interface, we need to disable the system register * view. @@ -446,7 +469,7 @@ u64 __vgic_v3_get_gic_config(void) if (has_vhe()) { flags = local_daif_save(); } else { - sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO); + sysreg_clear_set_hcr(0, HCR_AMO | HCR_FMO | HCR_IMO); isb(); } @@ -461,7 +484,7 @@ u64 __vgic_v3_get_gic_config(void) if (has_vhe()) { local_daif_restore(flags); } else { - sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0); + sysreg_clear_set_hcr(HCR_AMO | HCR_FMO | HCR_IMO, 0); isb(); } @@ -471,6 +494,16 @@ u64 __vgic_v3_get_gic_config(void) return val; } +static void __vgic_v3_compat_mode_enable(void) +{ + if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) + return; + + sysreg_clear_set_s(SYS_ICH_VCTLR_EL2, 0, ICH_VCTLR_EL2_V3); + /* Wait for V3 to become enabled */ + isb(); +} + static u64 __vgic_v3_read_vmcr(void) { return read_gicreg(ICH_VMCR_EL2); @@ -490,6 +523,8 @@ void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if) void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if) { + __vgic_v3_compat_mode_enable(); + /* * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen * is dependent on ICC_SRE_EL1.SRE, and we have to perform the @@ -1050,7 +1085,7 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu, { u64 ich_hcr; - if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + if (!is_nested_ctxt(vcpu)) return false; ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2); @@ -1058,11 +1093,11 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu, switch (sysreg) { case SYS_ICC_IGRPEN0_EL1: if (is_read && - (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1)) return true; if (!is_read && - (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1)) return true; fallthrough; @@ -1079,11 +1114,11 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu, case SYS_ICC_IGRPEN1_EL1: if (is_read && - (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1)) return true; if (!is_read && - (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1)) return true; fallthrough; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 731a0378ed132..e482181c66322 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -48,102 +48,56 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); static u64 __compute_hcr(struct kvm_vcpu *vcpu) { - u64 hcr = vcpu->arch.hcr_el2; + u64 guest_hcr, hcr = vcpu->arch.hcr_el2; if (!vcpu_has_nv(vcpu)) return hcr; + /* + * We rely on the invariant that a vcpu entered from HYP + * context must also exit in the same context, as only an ERET + * instruction can kick us out of it, and we obviously trap + * that sucker. PSTATE.M will get fixed-up on exit. + */ if (is_hyp_ctxt(vcpu)) { + host_data_set_flag(VCPU_IN_HYP_CONTEXT); + hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB; if (!vcpu_el2_e2h_is_set(vcpu)) hcr |= HCR_NV1; - write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); - } - - return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE); -} - -static void __activate_cptr_traps(struct kvm_vcpu *vcpu) -{ - u64 cptr; - - /* - * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to - * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, - * except for some missing controls, such as TAM. - * In this case, CPTR_EL2.TAM has the same position with or without - * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM - * shift value for trapping the AMU accesses. - */ - u64 val = CPACR_EL1_TTA | CPTR_EL2_TAM; + /* + * Nothing in HCR_EL2 should impact running in hypervisor + * context, apart from bits we have defined as RESx (E2H, + * HCD and co), or that cannot be set directly (the EXCLUDE + * bits). Given that we OR the guest's view with the host's, + * we can use the 0 value as the starting point, and only + * use the config-driven RES1 bits. + */ + guest_hcr = kvm_vcpu_apply_reg_masks(vcpu, HCR_EL2, 0); - if (guest_owns_fp_regs()) { - val |= CPACR_EL1_FPEN; - if (vcpu_has_sve(vcpu)) - val |= CPACR_EL1_ZEN; + write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2); } else { - __activate_traps_fpsimd32(vcpu); - } - - if (!vcpu_has_nv(vcpu)) - goto write; - - /* - * The architecture is a bit crap (what a surprise): an EL2 guest - * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA, - * as they are RES0 in the guest's view. To work around it, trap the - * sucker using the very same bit it can't set... - */ - if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu)) - val |= CPTR_EL2_TCPAC; - - /* - * Layer the guest hypervisor's trap configuration on top of our own if - * we're in a nested context. - */ - if (is_hyp_ctxt(vcpu)) - goto write; - - cptr = vcpu_sanitised_cptr_el2(vcpu); - - /* - * Pay attention, there's some interesting detail here. - * - * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two - * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest): - * - * - CPTR_EL2.xEN = x0, traps are enabled - * - CPTR_EL2.xEN = x1, traps are disabled - * - * In other words, bit[0] determines if guest accesses trap or not. In - * the interest of simplicity, clear the entire field if the guest - * hypervisor has traps enabled to dispel any illusion of something more - * complicated taking place. - */ - if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0))) - val &= ~CPACR_EL1_FPEN; - if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0))) - val &= ~CPACR_EL1_ZEN; + host_data_clear_flag(VCPU_IN_HYP_CONTEXT); - if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) - val |= cptr & CPACR_EL1_E0POE; + guest_hcr = __vcpu_sys_reg(vcpu, HCR_EL2); + if (guest_hcr & HCR_NV) { + u64 va = __fix_to_virt(vncr_fixmap(smp_processor_id())); - val |= cptr & CPTR_EL2_TCPAC; - -write: - write_sysreg(val, cpacr_el1); -} + /* Inherit the low bits from the actual register */ + va |= __vcpu_sys_reg(vcpu, VNCR_EL2) & GENMASK(PAGE_SHIFT - 1, 0); + write_sysreg_s(va, SYS_VNCR_EL2); -static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) -{ - u64 val = CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN; + /* Force NV2 in case the guest is forgetful... */ + guest_hcr |= HCR_NV2; + } + } - if (cpus_have_final_cap(ARM64_SME)) - val |= CPACR_EL1_SMEN_EL1EN; + BUG_ON(host_data_test_flag(VCPU_IN_HYP_CONTEXT) && + host_data_test_flag(L1_VNCR_MAPPED)); - write_sysreg(val, cpacr_el1); + return hcr | (guest_hcr & ~NV_HCR_GUEST_EXCLUDE); } static void __activate_traps(struct kvm_vcpu *vcpu) @@ -184,7 +138,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ___deactivate_traps(vcpu); - write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + write_sysreg_hcr(HCR_HOST_VHE_FLAGS); if (has_cntpoff()) { struct timer_map map; @@ -198,9 +152,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) */ val = read_sysreg_el0(SYS_CNTP_CVAL); if (map.direct_ptimer == vcpu_ptimer(vcpu)) - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val; + __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val); if (map.direct_ptimer == vcpu_hptimer(vcpu)) - __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val; + __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val); offset = read_sysreg_s(SYS_CNTPOFF_EL2); @@ -459,6 +413,14 @@ static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code) if (ret) return false; + /* + * If we have to check for any VNCR mapping being invalidated, + * go back to the slow path for further processing. + */ + if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu) && + atomic_read(&vcpu->kvm->arch.vncr_map_count)) + return false; + __kvm_skip_instr(vcpu); return true; @@ -568,9 +530,12 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) /* * If we were in HYP context on entry, adjust the PSTATE view - * so that the usual helpers work correctly. + * so that the usual helpers work correctly. This enforces our + * invariant that the guest's HYP context status is preserved + * across a run. */ - if (vcpu_has_nv(vcpu) && (read_sysreg(hcr_el2) & HCR_NV)) { + if (vcpu_has_nv(vcpu) && + unlikely(host_data_test_flag(VCPU_IN_HYP_CONTEXT))) { u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT); switch (mode) { @@ -586,6 +551,10 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) *vcpu_cpsr(vcpu) |= mode; } + /* Apply extreme paranoia! */ + BUG_ON(vcpu_has_nv(vcpu) && + !!host_data_test_flag(VCPU_IN_HYP_CONTEXT) != is_hyp_ctxt(vcpu)); + return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers); } @@ -599,10 +568,10 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; - sysreg_save_host_state_vhe(host_ctxt); - fpsimd_lazy_switch_to_guest(vcpu); + sysreg_save_host_state_vhe(host_ctxt); + /* * Note that ARM erratum 1165522 requires us to configure both stage 1 * and stage 2 translation for the guest context before we clear @@ -627,15 +596,23 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) __deactivate_traps(vcpu); - fpsimd_lazy_switch_to_host(vcpu); - sysreg_restore_host_state_vhe(host_ctxt); + __debug_switch_to_host(vcpu); + + /* + * Ensure that all system register writes above have taken effect + * before returning to the host. In VHE mode, CPTR traps for + * FPSIMD/SVE/SME also apply to EL2, so FPSIMD/SVE/SME state must be + * manipulated after the ISB. + */ + isb(); + + fpsimd_lazy_switch_to_host(vcpu); + if (guest_owns_fp_regs()) __fpsimd_save_fpexc32(vcpu); - __debug_switch_to_host(vcpu); - return exit_code; } NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe); @@ -665,12 +642,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) */ local_daif_restore(DAIF_PROCCTX_NOIRQ); - /* - * When we exit from the guest we change a number of CPU configuration - * parameters, such as traps. We rely on the isb() in kvm_call_hyp*() - * to make sure these changes take effect before running the host or - * additional guests. - */ return ret; } diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 3814b0b2c937f..f28c6cf4fe1be 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -18,17 +18,17 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) { /* These registers are common with EL1 */ - __vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1); - __vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1); - - __vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR); - __vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0); - __vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1); - __vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR); - __vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR); - __vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR); - __vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR); - __vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR); + __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1)); + __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1)); + + __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR)); + __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0)); + __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1)); + __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR)); + __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR)); + __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR)); + __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR)); + __vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR)); /* * In VHE mode those registers are compatible between EL1 and EL2, @@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) * are always trapped, ensuring that the in-memory * copy is always up-to-date. A small blessing... */ - __vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR); - __vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0); - __vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1); - __vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR); + __vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR)); + __vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0)); + __vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1)); + __vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR)); if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { - __vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2); + __vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2)); if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { - __vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0); - __vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR); + __vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0)); + __vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR)); } if (ctxt_has_s1poe(&vcpu->arch.ctxt)) - __vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR); + __vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR)); } /* @@ -70,13 +70,16 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) */ val = read_sysreg_el1(SYS_CNTKCTL); val &= CNTKCTL_VALID_BITS; - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS; - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val; + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS); + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val); } - __vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1); - __vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR); - __vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR); + __vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1)); + __vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR)); + __vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR)); + + if (ctxt_has_sctlr2(&vcpu->arch.ctxt)) + __vcpu_assign_sys_reg(vcpu, SCTLR2_EL2, read_sysreg_el1(SYS_SCTLR2)); } static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu) @@ -139,6 +142,9 @@ static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2), sp_el1); write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2), SYS_ELR); write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2), SYS_SPSR); + + if (ctxt_has_sctlr2(&vcpu->arch.ctxt)) + write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR2_EL2), SYS_SCTLR2); } /* diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index 3d50a1bd2bdbc..ec25698186297 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -63,7 +63,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu, __load_stage2(mmu, mmu->arch); val = read_sysreg(hcr_el2); val &= ~HCR_TGE; - write_sysreg(val, hcr_el2); + write_sysreg_hcr(val); isb(); } @@ -73,7 +73,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt) * We're done with the TLB operation, let's restore the host's * view of HCR_EL2. */ - write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + write_sysreg_hcr(HCR_HOST_VHE_FLAGS); isb(); /* ... and the stage-2 MMU context that we switched away from */ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 569941eeb3fe9..58c5fe7d75727 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -270,6 +270,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) u32 feature; u8 action; gpa_t gpa; + uuid_t uuid; action = kvm_smccc_get_action(vcpu, func_id); switch (action) { @@ -355,10 +356,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) val[0] = gpa; break; case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: - val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; - val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; - val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2; - val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3; + uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM; + val[0] = smccc_uuid_to_reg(&uuid, 0); + val[1] = smccc_uuid_to_reg(&uuid, 1); + val[2] = smccc_uuid_to_reg(&uuid, 2); + val[3] = smccc_uuid_to_reg(&uuid, 3); break; case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: val[0] = smccc_feat->vendor_hyp_bmap; diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index a640e839848e6..6745f38b64f9c 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -15,13 +15,11 @@ #include <asm/kvm_nested.h> #include <asm/esr.h> -static void pend_sync_exception(struct kvm_vcpu *vcpu) +static unsigned int exception_target_el(struct kvm_vcpu *vcpu) { /* If not nesting, EL1 is the only possible exception target */ - if (likely(!vcpu_has_nv(vcpu))) { - kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); - return; - } + if (likely(!vcpu_has_nv(vcpu))) + return PSR_MODE_EL1h; /* * With NV, we need to pick between EL1 and EL2. Note that we @@ -32,26 +30,76 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu) switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { case PSR_MODE_EL2h: case PSR_MODE_EL2t: - kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); - break; + return PSR_MODE_EL2h; case PSR_MODE_EL1h: case PSR_MODE_EL1t: - kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); - break; + return PSR_MODE_EL1h; case PSR_MODE_EL0t: - if (vcpu_el2_tge_is_set(vcpu)) - kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); - else - kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); - break; + return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h; default: BUG(); } } -static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) +static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu) +{ + if (exception_target_el(vcpu) == PSR_MODE_EL2h) + return ESR_EL2; + + return ESR_EL1; +} + +static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu) +{ + if (exception_target_el(vcpu) == PSR_MODE_EL2h) + return FAR_EL2; + + return FAR_EL1; +} + +static void pend_sync_exception(struct kvm_vcpu *vcpu) +{ + if (exception_target_el(vcpu) == PSR_MODE_EL1h) + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); + else + kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); +} + +static void pend_serror_exception(struct kvm_vcpu *vcpu) { - return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target); + if (exception_target_el(vcpu) == PSR_MODE_EL1h) + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR); + else + kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR); +} + +static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx) +{ + u64 sctlr2; + + if (!kvm_has_sctlr2(vcpu->kvm)) + return false; + + if (is_nested_ctxt(vcpu) && + !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En)) + return false; + + if (exception_target_el(vcpu) == PSR_MODE_EL1h) + sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1); + else + sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2); + + return sctlr2 & BIT(idx); +} + +static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu) +{ + return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT); +} + +static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu) +{ + return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT); } static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) @@ -60,7 +108,11 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr bool is_aarch32 = vcpu_mode_is_32bit(vcpu); u64 esr = 0; - pend_sync_exception(vcpu); + /* This delight is brought to you by FEAT_DoubleFault2. */ + if (effective_sctlr2_ease(vcpu)) + pend_serror_exception(vcpu); + else + pend_sync_exception(vcpu); /* * Build an {i,d}abort, depending on the level and the @@ -83,13 +135,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr esr |= ESR_ELx_FSC_EXTABT; - if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) { - vcpu_write_sys_reg(vcpu, addr, FAR_EL1); - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); - } else { - vcpu_write_sys_reg(vcpu, addr, FAR_EL2); - vcpu_write_sys_reg(vcpu, esr, ESR_EL2); - } + vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu)); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } static void inject_undef64(struct kvm_vcpu *vcpu) @@ -105,10 +152,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) if (kvm_vcpu_trap_il_is32bit(vcpu)) esr |= ESR_ELx_IL; - if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); - else - vcpu_write_sys_reg(vcpu, esr, ESR_EL2); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } #define DFSR_FSC_EXTABT_LPAE 0x10 @@ -155,36 +199,35 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) vcpu_write_sys_reg(vcpu, far, FAR_EL1); } -/** - * kvm_inject_dabt - inject a data abort into the guest - * @vcpu: The VCPU to receive the data abort - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) +static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) { if (vcpu_el1_is_32bit(vcpu)) - inject_abt32(vcpu, false, addr); + inject_abt32(vcpu, iabt, addr); else - inject_abt64(vcpu, false, addr); + inject_abt64(vcpu, iabt, addr); } -/** - * kvm_inject_pabt - inject a prefetch abort into the guest - * @vcpu: The VCPU to receive the prefetch abort - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) +static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu) { - if (vcpu_el1_is_32bit(vcpu)) - inject_abt32(vcpu, true, addr); - else - inject_abt64(vcpu, true, addr); + if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA)) + return true; + + if (!vcpu_mode_priv(vcpu)) + return false; + + return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && + (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA); +} + +int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) +{ + lockdep_assert_held(&vcpu->mutex); + + if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu)) + return kvm_inject_nested_sea(vcpu, iabt, addr); + + __kvm_inject_sea(vcpu, iabt, addr); + return 1; } void kvm_inject_size_fault(struct kvm_vcpu *vcpu) @@ -194,10 +237,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) addr = kvm_vcpu_get_fault_ipa(vcpu); addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); - if (kvm_vcpu_trap_is_iabt(vcpu)) - kvm_inject_pabt(vcpu, addr); - else - kvm_inject_dabt(vcpu, addr); + __kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr); /* * If AArch64 or LPAE, set FSC to 0 to indicate an Address @@ -210,9 +250,9 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) return; - esr = vcpu_read_sys_reg(vcpu, ESR_EL1); + esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu)); esr &= ~GENMASK_ULL(5, 0); - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } /** @@ -230,25 +270,70 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } -void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) +static bool serror_is_masked(struct kvm_vcpu *vcpu) { - vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); - *vcpu_hcr(vcpu) |= HCR_VSE; + return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu); } -/** - * kvm_inject_vabt - inject an async abort / SError into the guest - * @vcpu: The VCPU to receive the exception - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - * - * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with - * the remaining ISS all-zeros so that this error is not interpreted as an - * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR - * value, so the CPU generates an imp-def value. - */ -void kvm_inject_vabt(struct kvm_vcpu *vcpu) +static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu) +{ + if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu)) + return true; + + if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA)) + return false; + + /* + * In another example where FEAT_DoubleFault2 is entirely backwards, + * "masked" as it relates to the routing effects of HCRX_EL2.TMEA + * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked + * for non-maskable SErrors, the EL2 bit takes priority if A is set. + */ + if (vcpu_mode_priv(vcpu)) + return *vcpu_cpsr(vcpu) & PSR_A_BIT; + + /* + * Otherwise SErrors are considered unmasked when taken from EL0 and + * NMEA is set. + */ + return serror_is_masked(vcpu); +} + +static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu) +{ + return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu)); +} + +int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr) { - kvm_set_sei_esr(vcpu, ESR_ELx_ISV); + lockdep_assert_held(&vcpu->mutex); + + if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu)) + return kvm_inject_nested_serror(vcpu, esr); + + if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) { + vcpu_set_vsesr(vcpu, esr); + vcpu_set_flag(vcpu, NESTED_SERROR_PENDING); + return 1; + } + + /* + * Emulate the exception entry if SErrors are unmasked. This is useful if + * the vCPU is in a nested context w/ vSErrors enabled then we've already + * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2, + * VDISR_EL2) to the guest hypervisor. + * + * As we're emulating the SError injection we need to explicitly populate + * ESR_ELx.EC because hardware will not do it on our behalf. + */ + if (!serror_is_masked(vcpu)) { + pend_serror_exception(vcpu); + esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); + return 1; + } + + vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); + *vcpu_hcr(vcpu) |= HCR_VSE; + return 1; } diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index ab365e839874e..54f9358c9e0e8 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -72,7 +72,7 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len) return data; } -static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu) +static bool kvm_pending_external_abort(struct kvm_vcpu *vcpu) { if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION)) return false; @@ -90,6 +90,8 @@ static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu) switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC): case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC): + case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR): + case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR): return true; default: return false; @@ -113,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) * Detect if the MMIO return was already handled or if userspace aborted * the MMIO access. */ - if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu))) + if (unlikely(!vcpu->mmio_needed || kvm_pending_external_abort(vcpu))) return 1; vcpu->mmio_needed = 0; @@ -169,10 +171,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), kvm_vcpu_get_hfar(vcpu), fault_ipa); - if (vcpu_is_protected(vcpu)) { - kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - return 1; - } + if (vcpu_is_protected(vcpu)) + return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, &vcpu->kvm->arch.flags)) { diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index eeda92330ade7..1c78864767c5c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -193,11 +193,6 @@ int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, return 0; } -static bool kvm_is_device_pfn(unsigned long pfn) -{ - return !pfn_is_map_memory(pfn); -} - static void *stage2_memcache_zalloc_page(void *arg) { struct kvm_mmu_memory_cache *mc = arg; @@ -1304,6 +1299,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, if (map_size == PAGE_SIZE) return true; + /* pKVM only supports PMD_SIZE huge-mappings */ + if (is_protected_kvm_enabled() && map_size != PMD_SIZE) + return false; + size = memslot->npages * PAGE_SIZE; gpa_start = memslot->base_gfn << PAGE_SHIFT; @@ -1466,6 +1465,18 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) return vma->vm_flags & VM_MTE_ALLOWED; } +static bool kvm_vma_is_cacheable(struct vm_area_struct *vma) +{ + switch (FIELD_GET(PTE_ATTRINDX_MASK, pgprot_val(vma->vm_page_prot))) { + case MT_NORMAL_NC: + case MT_DEVICE_nGnRnE: + case MT_DEVICE_nGnRE: + return false; + default: + return true; + } +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_trans *nested, struct kvm_memory_slot *memslot, unsigned long hva, @@ -1473,8 +1484,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, { int ret = 0; bool write_fault, writable, force_pte = false; - bool exec_fault, mte_allowed; - bool device = false, vfio_allow_any_uc = false; + bool exec_fault, mte_allowed, is_vma_cacheable; + bool s2_force_noncacheable = false, vfio_allow_any_uc = false; unsigned long mmu_seq; phys_addr_t ipa = fault_ipa; struct kvm *kvm = vcpu->kvm; @@ -1488,6 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; struct page *page; + vm_flags_t vm_flags; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED; if (fault_is_perm) @@ -1540,7 +1552,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * logging_active is guaranteed to never be true for VM_PFNMAP * memslots. */ - if (logging_active || is_protected_kvm_enabled()) { + if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; } else { @@ -1615,6 +1627,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; + vm_flags = vma->vm_flags; + + is_vma_cacheable = kvm_vma_is_cacheable(vma); + /* Don't use the VMA after the unlock -- it may have vanished */ vma = NULL; @@ -1638,18 +1654,39 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_noslot_pfn(pfn)) return -EFAULT; - if (kvm_is_device_pfn(pfn)) { - /* - * If the page was identified as device early by looking at - * the VMA flags, vma_pagesize is already representing the - * largest quantity we can map. If instead it was mapped - * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE - * and must not be upgraded. - * - * In both cases, we don't let transparent_hugepage_adjust() - * change things at the last minute. - */ - device = true; + /* + * Check if this is non-struct page memory PFN, and cannot support + * CMOs. It could potentially be unsafe to access as cachable. + */ + if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) { + if (is_vma_cacheable) { + /* + * Whilst the VMA owner expects cacheable mapping to this + * PFN, hardware also has to support the FWB and CACHE DIC + * features. + * + * ARM64 KVM relies on kernel VA mapping to the PFN to + * perform cache maintenance as the CMO instructions work on + * virtual addresses. VM_PFNMAP region are not necessarily + * mapped to a KVA and hence the presence of hardware features + * S2FWB and CACHE DIC are mandatory to avoid the need for + * cache maintenance. + */ + if (!kvm_supports_cacheable_pfnmap()) + return -EFAULT; + } else { + /* + * If the page was identified as device early by looking at + * the VMA flags, vma_pagesize is already representing the + * largest quantity we can map. If instead it was mapped + * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE + * and must not be upgraded. + * + * In both cases, we don't let transparent_hugepage_adjust() + * change things at the last minute. + */ + s2_force_noncacheable = true; + } } else if (logging_active && !write_fault) { /* * Only actually map the page as writable if this was a write @@ -1658,7 +1695,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, writable = false; } - if (exec_fault && device) + if (exec_fault && s2_force_noncacheable) return -ENOEXEC; /* @@ -1691,7 +1728,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * If we are not forced to use page mapping, check if we are * backed by a THP and thus use block mapping if possible. */ - if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { + if (vma_pagesize == PAGE_SIZE && !(force_pte || s2_force_noncacheable)) { if (fault_is_perm && fault_granule > PAGE_SIZE) vma_pagesize = fault_granule; else @@ -1705,7 +1742,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } } - if (!fault_is_perm && !device && kvm_has_mte(kvm)) { + if (!fault_is_perm && !s2_force_noncacheable && kvm_has_mte(kvm)) { /* Check the VMM hasn't introduced a new disallowed VMA */ if (mte_allowed) { sanitise_mte_tags(kvm, pfn, vma_pagesize); @@ -1721,7 +1758,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault) prot |= KVM_PGTABLE_PROT_X; - if (device) { + if (s2_force_noncacheable) { if (vfio_allow_any_uc) prot |= KVM_PGTABLE_PROT_NORMAL_NC; else @@ -1804,7 +1841,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) * There is no need to pass the error into the guest. */ if (kvm_handle_guest_sea()) - kvm_inject_vabt(vcpu); + return kvm_inject_serror(vcpu); return 1; } @@ -1832,11 +1869,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) { fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); - if (is_iabt) - kvm_inject_pabt(vcpu, fault_ipa); - else - kvm_inject_dabt(vcpu, fault_ipa); - return 1; + return kvm_inject_sea(vcpu, is_iabt, fault_ipa); } } @@ -1908,8 +1941,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) } if (kvm_vcpu_abt_iss1tw(vcpu)) { - kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - ret = 1; + ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); goto out_unlock; } @@ -1954,10 +1986,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) if (ret == 0) ret = 1; out: - if (ret == -ENOEXEC) { - kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - ret = 1; - } + if (ret == -ENOEXEC) + ret = kvm_inject_sea_iabt(vcpu, kvm_vcpu_get_hfar(vcpu)); out_unlock: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; @@ -2217,6 +2247,15 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ret = -EINVAL; break; } + + /* + * Cacheable PFNMAP is allowed only if the hardware + * supports it. + */ + if (kvm_vma_is_cacheable(vma) && !kvm_supports_cacheable_pfnmap()) { + ret = -EINVAL; + break; + } } hva = min(reg_end, vma->vm_end); } while (hva < reg_end); diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 4a3fc11f7ecf3..153b3e11b115d 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -8,6 +8,7 @@ #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <asm/fixmap.h> #include <asm/kvm_arm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> @@ -16,6 +17,24 @@ #include "sys_regs.h" +struct vncr_tlb { + /* The guest's VNCR_EL2 */ + u64 gva; + struct s1_walk_info wi; + struct s1_walk_result wr; + + u64 hpa; + + /* -1 when not mapped on a CPU */ + int cpu; + + /* + * true if the TLB is valid. Can only be changed with the + * mmu_lock held. + */ + bool valid; +}; + /* * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between * memory usage and potential number of different sets of S2 PTs in @@ -28,6 +47,7 @@ void kvm_init_nested(struct kvm *kvm) { kvm->arch.nested_mmus = NULL; kvm->arch.nested_mmus_size = 0; + atomic_set(&kvm->arch.vncr_map_count, 0); } static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) @@ -55,6 +75,13 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) !cpus_have_final_cap(ARM64_HAS_HCR_NV1)) return -EINVAL; + if (!vcpu->arch.ctxt.vncr_array) + vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT | + __GFP_ZERO); + + if (!vcpu->arch.ctxt.vncr_array) + return -ENOMEM; + /* * Let's treat memory allocation failures as benign: If we fail to * allocate anything, return an error and keep the allocated array @@ -85,6 +112,9 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++) kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]); + free_page((unsigned long)vcpu->arch.ctxt.vncr_array); + vcpu->arch.ctxt.vncr_array = NULL; + return ret; } @@ -405,6 +435,30 @@ static unsigned int ttl_to_size(u8 ttl) return max_size; } +static u8 pgshift_level_to_ttl(u16 shift, u8 level) +{ + u8 ttl; + + switch(shift) { + case 12: + ttl = TLBI_TTL_TG_4K; + break; + case 14: + ttl = TLBI_TTL_TG_16K; + break; + case 16: + ttl = TLBI_TTL_TG_64K; + break; + default: + BUG(); + } + + ttl <<= 2; + ttl |= level & 3; + + return ttl; +} + /* * Compute the equivalent of the TTL field by parsing the shadow PT. The * granule size is extracted from the cached VTCR_EL2.TG0 while the level is @@ -676,23 +730,36 @@ void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu) void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu) { /* - * The vCPU kept its reference on the MMU after the last put, keep - * rolling with it. + * If the vCPU kept its reference on the MMU after the last put, + * keep rolling with it. */ - if (vcpu->arch.hw_mmu) - return; - if (is_hyp_ctxt(vcpu)) { - vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; + if (!vcpu->arch.hw_mmu) + vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; } else { - write_lock(&vcpu->kvm->mmu_lock); - vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu); - write_unlock(&vcpu->kvm->mmu_lock); + if (!vcpu->arch.hw_mmu) { + scoped_guard(write_lock, &vcpu->kvm->mmu_lock) + vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu); + } + + if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV) + kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu); } } void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu) { + /* Unconditionally drop the VNCR mapping if we have one */ + if (host_data_test_flag(L1_VNCR_MAPPED)) { + BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id()); + BUG_ON(is_hyp_ctxt(vcpu)); + + clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu)); + vcpu->arch.vncr_tlb->cpu = -1; + host_data_clear_flag(L1_VNCR_MAPPED); + atomic_dec(&vcpu->kvm->arch.vncr_map_count); + } + /* * Keep a reference on the associated stage-2 MMU if the vCPU is * scheduling out and not in WFI emulation, suggesting it is likely to @@ -743,6 +810,247 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2) return kvm_inject_nested_sync(vcpu, esr_el2); } +static void invalidate_vncr(struct vncr_tlb *vt) +{ + vt->valid = false; + if (vt->cpu != -1) + clear_fixmap(vncr_fixmap(vt->cpu)); +} + +static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + + lockdep_assert_held_write(&kvm->mmu_lock); + + if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) + return; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + u64 ipa_start, ipa_end, ipa_size; + + /* + * Careful here: We end-up here from an MMU notifier, + * and this can race against a vcpu not being onlined + * yet, without the pseudo-TLB being allocated. + * + * Skip those, as they obviously don't participate in + * the invalidation at this stage. + */ + if (!vt) + continue; + + if (!vt->valid) + continue; + + ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, + vt->wr.level)); + ipa_start = vt->wr.pa & (ipa_size - 1); + ipa_end = ipa_start + ipa_size; + + if (ipa_end <= start || ipa_start >= end) + continue; + + invalidate_vncr(vt); + } +} + +struct s1e2_tlbi_scope { + enum { + TLBI_ALL, + TLBI_VA, + TLBI_VAA, + TLBI_ASID, + } type; + + u16 asid; + u64 va; + u64 size; +}; + +static void invalidate_vncr_va(struct kvm *kvm, + struct s1e2_tlbi_scope *scope) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + + lockdep_assert_held_write(&kvm->mmu_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + u64 va_start, va_end, va_size; + + if (!vt->valid) + continue; + + va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, + vt->wr.level)); + va_start = vt->gva & (va_size - 1); + va_end = va_start + va_size; + + switch (scope->type) { + case TLBI_ALL: + break; + + case TLBI_VA: + if (va_end <= scope->va || + va_start >= (scope->va + scope->size)) + continue; + if (vt->wr.nG && vt->wr.asid != scope->asid) + continue; + break; + + case TLBI_VAA: + if (va_end <= scope->va || + va_start >= (scope->va + scope->size)) + continue; + break; + + case TLBI_ASID: + if (!vt->wr.nG || vt->wr.asid != scope->asid) + continue; + break; + } + + invalidate_vncr(vt); + } +} + +#define tlbi_va_s1_to_va(v) (u64)sign_extend64((v) << 12, 48) + +static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val, + struct s1e2_tlbi_scope *scope) +{ + switch (inst) { + case OP_TLBI_ALLE2: + case OP_TLBI_ALLE2IS: + case OP_TLBI_ALLE2OS: + case OP_TLBI_VMALLE1: + case OP_TLBI_VMALLE1IS: + case OP_TLBI_VMALLE1OS: + case OP_TLBI_ALLE2NXS: + case OP_TLBI_ALLE2ISNXS: + case OP_TLBI_ALLE2OSNXS: + case OP_TLBI_VMALLE1NXS: + case OP_TLBI_VMALLE1ISNXS: + case OP_TLBI_VMALLE1OSNXS: + scope->type = TLBI_ALL; + break; + case OP_TLBI_VAE2: + case OP_TLBI_VAE2IS: + case OP_TLBI_VAE2OS: + case OP_TLBI_VAE1: + case OP_TLBI_VAE1IS: + case OP_TLBI_VAE1OS: + case OP_TLBI_VAE2NXS: + case OP_TLBI_VAE2ISNXS: + case OP_TLBI_VAE2OSNXS: + case OP_TLBI_VAE1NXS: + case OP_TLBI_VAE1ISNXS: + case OP_TLBI_VAE1OSNXS: + case OP_TLBI_VALE2: + case OP_TLBI_VALE2IS: + case OP_TLBI_VALE2OS: + case OP_TLBI_VALE1: + case OP_TLBI_VALE1IS: + case OP_TLBI_VALE1OS: + case OP_TLBI_VALE2NXS: + case OP_TLBI_VALE2ISNXS: + case OP_TLBI_VALE2OSNXS: + case OP_TLBI_VALE1NXS: + case OP_TLBI_VALE1ISNXS: + case OP_TLBI_VALE1OSNXS: + scope->type = TLBI_VA; + scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); + if (!scope->size) + scope->size = SZ_1G; + scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); + scope->asid = FIELD_GET(TLBIR_ASID_MASK, val); + break; + case OP_TLBI_ASIDE1: + case OP_TLBI_ASIDE1IS: + case OP_TLBI_ASIDE1OS: + case OP_TLBI_ASIDE1NXS: + case OP_TLBI_ASIDE1ISNXS: + case OP_TLBI_ASIDE1OSNXS: + scope->type = TLBI_ASID; + scope->asid = FIELD_GET(TLBIR_ASID_MASK, val); + break; + case OP_TLBI_VAAE1: + case OP_TLBI_VAAE1IS: + case OP_TLBI_VAAE1OS: + case OP_TLBI_VAAE1NXS: + case OP_TLBI_VAAE1ISNXS: + case OP_TLBI_VAAE1OSNXS: + case OP_TLBI_VAALE1: + case OP_TLBI_VAALE1IS: + case OP_TLBI_VAALE1OS: + case OP_TLBI_VAALE1NXS: + case OP_TLBI_VAALE1ISNXS: + case OP_TLBI_VAALE1OSNXS: + scope->type = TLBI_VAA; + scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); + if (!scope->size) + scope->size = SZ_1G; + scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); + break; + case OP_TLBI_RVAE2: + case OP_TLBI_RVAE2IS: + case OP_TLBI_RVAE2OS: + case OP_TLBI_RVAE1: + case OP_TLBI_RVAE1IS: + case OP_TLBI_RVAE1OS: + case OP_TLBI_RVAE2NXS: + case OP_TLBI_RVAE2ISNXS: + case OP_TLBI_RVAE2OSNXS: + case OP_TLBI_RVAE1NXS: + case OP_TLBI_RVAE1ISNXS: + case OP_TLBI_RVAE1OSNXS: + case OP_TLBI_RVALE2: + case OP_TLBI_RVALE2IS: + case OP_TLBI_RVALE2OS: + case OP_TLBI_RVALE1: + case OP_TLBI_RVALE1IS: + case OP_TLBI_RVALE1OS: + case OP_TLBI_RVALE2NXS: + case OP_TLBI_RVALE2ISNXS: + case OP_TLBI_RVALE2OSNXS: + case OP_TLBI_RVALE1NXS: + case OP_TLBI_RVALE1ISNXS: + case OP_TLBI_RVALE1OSNXS: + scope->type = TLBI_VA; + scope->va = decode_range_tlbi(val, &scope->size, &scope->asid); + break; + case OP_TLBI_RVAAE1: + case OP_TLBI_RVAAE1IS: + case OP_TLBI_RVAAE1OS: + case OP_TLBI_RVAAE1NXS: + case OP_TLBI_RVAAE1ISNXS: + case OP_TLBI_RVAAE1OSNXS: + case OP_TLBI_RVAALE1: + case OP_TLBI_RVAALE1IS: + case OP_TLBI_RVAALE1OS: + case OP_TLBI_RVAALE1NXS: + case OP_TLBI_RVAALE1ISNXS: + case OP_TLBI_RVAALE1OSNXS: + scope->type = TLBI_VAA; + scope->va = decode_range_tlbi(val, &scope->size, NULL); + break; + } +} + +void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val) +{ + struct s1e2_tlbi_scope scope = {}; + + compute_s1_tlbi_range(vcpu, inst, val, &scope); + + guard(write_lock)(&vcpu->kvm->mmu_lock); + invalidate_vncr_va(vcpu->kvm, &scope); +} + void kvm_nested_s2_wp(struct kvm *kvm) { int i; @@ -755,6 +1063,8 @@ void kvm_nested_s2_wp(struct kvm *kvm) if (kvm_s2_mmu_valid(mmu)) kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu)); } + + kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); } void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block) @@ -769,6 +1079,8 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block) if (kvm_s2_mmu_valid(mmu)) kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block); } + + kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); } void kvm_nested_s2_flush(struct kvm *kvm) @@ -802,6 +1114,310 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) } /* + * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter: + * + * - We introduce an internal representation of a vcpu-private TLB, + * representing the mapping between the guest VA contained in VNCR_EL2, + * the IPA the guest's EL2 PTs point to, and the actual PA this lives at. + * + * - On translation fault from a nested VNCR access, we create such a TLB. + * If there is no mapping to describe, the guest inherits the fault. + * Crucially, no actual mapping is done at this stage. + * + * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above + * TLB exists, we map it in the fixmap for this CPU, and run with it. We + * have to respect the permissions dictated by the guest, but not the + * memory type (FWB is a must). + * + * - Note that we usually don't do a vcpu_load() on the back of a fault + * (unless we are preempted), so the resolution of a translation fault + * must go via a request that will map the VNCR page in the fixmap. + * vcpu_load() might as well use the same mechanism. + * + * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was + * mapped, we unmap it. Yes it is that simple. The TLB still exists + * though, and may be reused at a later load. + * + * - On permission fault, we simply forward the fault to the guest's EL2. + * Get out of my way. + * + * - On any TLBI for the EL2&0 translation regime, we must find any TLB that + * intersects with the TLBI request, invalidate it, and unmap the page + * from the fixmap. Because we need to look at all the vcpu-private TLBs, + * this requires some wide-ranging locking to ensure that nothing races + * against it. This may require some refcounting to avoid the search when + * no such TLB is present. + * + * - On MMU notifiers, we must invalidate our TLB in a similar way, but + * looking at the IPA instead. The funny part is that there may not be a + * stage-2 mapping for this page if L1 hasn't accessed it using LD/ST + * instructions. + */ + +int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu) +{ + if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) + return 0; + + vcpu->arch.vncr_tlb = kzalloc(sizeof(*vcpu->arch.vncr_tlb), + GFP_KERNEL_ACCOUNT); + if (!vcpu->arch.vncr_tlb) + return -ENOMEM; + + return 0; +} + +static u64 read_vncr_el2(struct kvm_vcpu *vcpu) +{ + return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48); +} + +static int kvm_translate_vncr(struct kvm_vcpu *vcpu) +{ + bool write_fault, writable; + unsigned long mmu_seq; + struct vncr_tlb *vt; + struct page *page; + u64 va, pfn, gfn; + int ret; + + vt = vcpu->arch.vncr_tlb; + + /* + * If we're about to walk the EL2 S1 PTs, we must invalidate the + * current TLB, as it could be sampled from another vcpu doing a + * TLBI *IS. A real CPU wouldn't do that, but we only keep a single + * translation, so not much of a choice. + * + * We also prepare the next walk wilst we're at it. + */ + scoped_guard(write_lock, &vcpu->kvm->mmu_lock) { + invalidate_vncr(vt); + + vt->wi = (struct s1_walk_info) { + .regime = TR_EL20, + .as_el0 = false, + .pan = false, + }; + vt->wr = (struct s1_walk_result){}; + } + + guard(srcu)(&vcpu->kvm->srcu); + + va = read_vncr_el2(vcpu); + + ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va); + if (ret) + return ret; + + write_fault = kvm_is_write_fault(vcpu); + + mmu_seq = vcpu->kvm->mmu_invalidate_seq; + smp_rmb(); + + gfn = vt->wr.pa >> PAGE_SHIFT; + pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writable, &page); + if (is_error_noslot_pfn(pfn) || (write_fault && !writable)) + return -EFAULT; + + scoped_guard(write_lock, &vcpu->kvm->mmu_lock) { + if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) + return -EAGAIN; + + vt->gva = va; + vt->hpa = pfn << PAGE_SHIFT; + vt->valid = true; + vt->cpu = -1; + + kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu); + kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw); + } + + if (vt->wr.pw) + mark_page_dirty(vcpu->kvm, gfn); + + return 0; +} + +static void inject_vncr_perm(struct kvm_vcpu *vcpu) +{ + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + u64 esr = kvm_vcpu_get_esr(vcpu); + + /* Adjust the fault level to reflect that of the guest's */ + esr &= ~ESR_ELx_FSC; + esr |= FIELD_PREP(ESR_ELx_FSC, + ESR_ELx_FSC_PERM_L(vt->wr.level)); + + kvm_inject_nested_sync(vcpu, esr); +} + +static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu) +{ + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + + lockdep_assert_held_read(&vcpu->kvm->mmu_lock); + + if (!vt->valid) + return false; + + if (read_vncr_el2(vcpu) != vt->gva) + return false; + + if (vt->wr.nG) { + u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); + u64 ttbr = ((tcr & TCR_A1) ? + vcpu_read_sys_reg(vcpu, TTBR1_EL2) : + vcpu_read_sys_reg(vcpu, TTBR0_EL2)); + u16 asid; + + asid = FIELD_GET(TTBR_ASID_MASK, ttbr); + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || + !(tcr & TCR_ASID16)) + asid &= GENMASK(7, 0); + + return asid != vt->wr.asid; + } + + return true; +} + +int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu) +{ + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + u64 esr = kvm_vcpu_get_esr(vcpu); + + BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT)); + + if (esr_fsc_is_permission_fault(esr)) { + inject_vncr_perm(vcpu); + } else if (esr_fsc_is_translation_fault(esr)) { + bool valid; + int ret; + + scoped_guard(read_lock, &vcpu->kvm->mmu_lock) + valid = kvm_vncr_tlb_lookup(vcpu); + + if (!valid) + ret = kvm_translate_vncr(vcpu); + else + ret = -EPERM; + + switch (ret) { + case -EAGAIN: + case -ENOMEM: + /* Let's try again... */ + break; + case -EFAULT: + case -EINVAL: + case -ENOENT: + case -EACCES: + /* + * Translation failed, inject the corresponding + * exception back to EL2. + */ + BUG_ON(!vt->wr.failed); + + esr &= ~ESR_ELx_FSC; + esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst); + + kvm_inject_nested_sync(vcpu, esr); + break; + case -EPERM: + /* Hack to deal with POE until we get kernel support */ + inject_vncr_perm(vcpu); + break; + case 0: + break; + } + } else { + WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr); + } + + return 1; +} + +static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu) +{ + struct vncr_tlb *vt = vcpu->arch.vncr_tlb; + pgprot_t prot; + + guard(preempt)(); + guard(read_lock)(&vcpu->kvm->mmu_lock); + + /* + * The request to map VNCR may have raced against some other + * event, such as an interrupt, and may not be valid anymore. + */ + if (is_hyp_ctxt(vcpu)) + return; + + /* + * Check that the pseudo-TLB is valid and that VNCR_EL2 still + * contains the expected value. If it doesn't, we simply bail out + * without a mapping -- a transformed MSR/MRS will generate the + * fault and allows us to populate the pseudo-TLB. + */ + if (!vt->valid) + return; + + if (read_vncr_el2(vcpu) != vt->gva) + return; + + if (vt->wr.nG) { + u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); + u64 ttbr = ((tcr & TCR_A1) ? + vcpu_read_sys_reg(vcpu, TTBR1_EL2) : + vcpu_read_sys_reg(vcpu, TTBR0_EL2)); + u16 asid; + + asid = FIELD_GET(TTBR_ASID_MASK, ttbr); + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || + !(tcr & TCR_ASID16)) + asid &= GENMASK(7, 0); + + if (asid != vt->wr.asid) + return; + } + + vt->cpu = smp_processor_id(); + + if (vt->wr.pw && vt->wr.pr) + prot = PAGE_KERNEL; + else if (vt->wr.pr) + prot = PAGE_KERNEL_RO; + else + prot = PAGE_NONE; + + /* + * We can't map write-only (or no permission at all) in the kernel, + * but the guest can do it if using POE, so we'll have to turn a + * translation fault into a permission fault at runtime. + * FIXME: WO doesn't work at all, need POE support in the kernel. + */ + if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) { + __set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot); + host_data_set_flag(L1_VNCR_MAPPED); + atomic_inc(&vcpu->kvm->arch.vncr_map_count); + } +} + +#define has_tgran_2(__r, __sz) \ + ({ \ + u64 _s1, _s2, _mmfr0 = __r; \ + \ + _s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \ + TGRAN##__sz##_2, _mmfr0); \ + \ + _s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \ + TGRAN##__sz, _mmfr0); \ + \ + ((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI && \ + _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \ + (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \ + _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI)); \ + }) +/* * Our emulated CPU doesn't support all the possible features. For the * sake of simplicity (and probably mental sanity), wipe out a number * of feature bits we don't intend to support for the time being. @@ -810,6 +1426,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) */ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) { + u64 orig_val = val; + switch (reg) { case SYS_ID_AA64ISAR0_EL1: /* Support everything but TME */ @@ -823,12 +1441,11 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) break; case SYS_ID_AA64PFR0_EL1: - /* No RME, AMU, MPAM, S-EL2, or RAS */ + /* No RME, AMU, MPAM, or S-EL2 */ val &= ~(ID_AA64PFR0_EL1_RME | ID_AA64PFR0_EL1_AMU | ID_AA64PFR0_EL1_MPAM | ID_AA64PFR0_EL1_SEL2 | - ID_AA64PFR0_EL1_RAS | ID_AA64PFR0_EL1_EL3 | ID_AA64PFR0_EL1_EL2 | ID_AA64PFR0_EL1_EL1 | @@ -879,13 +1496,16 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) */ switch (PAGE_SIZE) { case SZ_4K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); + if (has_tgran_2(orig_val, 4)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); fallthrough; case SZ_16K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); + if (has_tgran_2(orig_val, 16)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); fallthrough; case SZ_64K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); + if (has_tgran_2(orig_val, 64)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); break; } @@ -1018,280 +1638,65 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu) set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1); /* HCR_EL2 */ - res0 = BIT(48); - res1 = HCR_RW; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP)) - res0 |= GENMASK(63, 59); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2)) - res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA); - if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS)) - res0 |= (HCR_TTLBIS | HCR_TTLBOS); - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) && - !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2)) - res0 |= HCR_ENSCXT; - if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP)) - res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4); - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1)) - res0 |= HCR_AMVOFFEN; - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1)) - res0 |= HCR_FIEN; - if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP)) - res0 |= HCR_FWB; - /* Implementation choice: NV2 is the only supported config */ - if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) - res0 |= (HCR_NV2 | HCR_NV | HCR_AT); - if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, NI)) - res0 |= HCR_NV1; - if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && - kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) - res0 |= (HCR_API | HCR_APK); - if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP)) - res0 |= BIT(39); - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) - res0 |= (HCR_TEA | HCR_TERR); - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) - res0 |= HCR_TLOR; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP)) - res0 |= HCR_E2H; - if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP)) - res1 |= HCR_E2H; + get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1); set_sysreg_masks(kvm, HCR_EL2, res0, res1); /* HCRX_EL2 */ - res0 = HCRX_EL2_RES0; - res1 = HCRX_EL2_RES1; - if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP)) - res0 |= HCRX_EL2_PACMEn; - if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP)) - res0 |= HCRX_EL2_EnFPM; - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) - res0 |= HCRX_EL2_GCSEn; - if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP)) - res0 |= HCRX_EL2_EnIDCP128; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC)) - res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP)) - res0 |= HCRX_EL2_TMEA; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP)) - res0 |= HCRX_EL2_D128En; - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) - res0 |= HCRX_EL2_PTTWI; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP)) - res0 |= HCRX_EL2_SCTLR2En; - if (!kvm_has_tcr2(kvm)) - res0 |= HCRX_EL2_TCR2En; - if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP)) - res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2); - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP)) - res0 |= HCRX_EL2_CMOW; - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP)) - res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) || - !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS)) - res0 |= HCRX_EL2_SMPME; - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) - res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS); - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V)) - res0 |= HCRX_EL2_EnASR; - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64)) - res0 |= HCRX_EL2_EnALS; - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA)) - res0 |= HCRX_EL2_EnAS0; + get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1); set_sysreg_masks(kvm, HCRX_EL2, res0, res1); /* HFG[RW]TR_EL2 */ - res0 = res1 = 0; - if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && - kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) - res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey | - HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey | - HFGxTR_EL2_APIBKey); - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) - res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 | - HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 | - HFGxTR_EL2_LORSA_EL1); - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) && - !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2)) - res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0); - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP)) - res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1; - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) - res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 | - HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 | - HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 | - HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 | - HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1); - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA)) - res0 |= HFGxTR_EL2_nACCDATA_EL1; - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) - res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP)) - res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) - res0 |= HFGxTR_EL2_nRCWMASK_EL1; - if (!kvm_has_s1pie(kvm)) - res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1); - if (!kvm_has_s1poe(kvm)) - res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1); - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) - res0 |= HFGxTR_EL2_nS2POR_EL1; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP)) - res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1); - set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1); - set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1); + get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1); + set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1); + get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1); + set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1); /* HDFG[RW]TR_EL2 */ - res0 = res1 = 0; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP)) - res0 |= HDFGRTR_EL2_OSDLR_EL1; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) - res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 | - HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 | - HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN | - HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 | - HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 | - HDFGRTR_EL2_PMCEIDn_EL0); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) - res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 | - HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 | - HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 | - HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 | - HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 | - HDFGRTR_EL2_PMBIDR_EL1); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) - res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS | - HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM | - HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID | - HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR | - HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR | - HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR | - HDFGRTR_EL2_TRCVICTLR); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP)) - res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 | - HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 | - HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 | - HDFGRTR_EL2_TRBTRG_EL1); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) - res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL | - HDFGRTR_EL2_nBRBDATA); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2)) - res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1; - set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1); - - /* Reuse the bits from the read-side and add the write-specific stuff */ - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) - res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) - res0 |= HDFGWTR_EL2_TRCOSLAR; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP)) - res0 |= HDFGWTR_EL2_TRFCR_EL1; - set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1); + get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1); + set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1); + get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1); + set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1); /* HFGITR_EL2 */ - res0 = HFGITR_EL2_RES0; - res1 = HFGITR_EL2_RES1; - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2)) - res0 |= HFGITR_EL2_DCCVADP; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2)) - res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP); - if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) - res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS | - HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS | - HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS | - HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS | - HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS); - if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) - res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 | - HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 | - HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS | - HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS | - HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS | - HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS); - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP)) - res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX | - HFGITR_EL2_CPPRCTX); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) - res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL); - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) - res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 | - HFGITR_EL2_nGCSEPP); - if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX)) - res0 |= HFGITR_EL2_COSPRCTX; - if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) - res0 |= HFGITR_EL2_ATS1E1A; + get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1); set_sysreg_masks(kvm, HFGITR_EL2, res0, res1); /* HAFGRTR_EL2 - not a lot to see here */ - res0 = HAFGRTR_EL2_RES0; - res1 = HAFGRTR_EL2_RES1; - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1)) - res0 |= ~(res0 | res1); + get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1); set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1); + /* HFG[RW]TR2_EL2 */ + get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1); + get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1); + + /* HDFG[RW]TR2_EL2 */ + get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1); + get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1); + + /* HFGITR2_EL2 */ + get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1); + /* TCR2_EL2 */ - res0 = TCR2_EL2_RES0; - res1 = TCR2_EL2_RES1; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP)) - res0 |= (TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1 | TCR2_EL2_D128); - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, MEC, IMP)) - res0 |= TCR2_EL2_AMEC1 | TCR2_EL2_AMEC0; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, HAFDBS, HAFT)) - res0 |= TCR2_EL2_HAFT; - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) - res0 |= TCR2_EL2_PTTWI | TCR2_EL2_PnCH; - if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP)) - res0 |= TCR2_EL2_AIE; - if (!kvm_has_s1poe(kvm)) - res0 |= TCR2_EL2_POE | TCR2_EL2_E0POE; - if (!kvm_has_s1pie(kvm)) - res0 |= TCR2_EL2_PIE; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP)) - res0 |= (TCR2_EL2_E0POE | TCR2_EL2_D128 | - TCR2_EL2_AMEC1 | TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1); + get_reg_fixed_bits(kvm, TCR2_EL2, &res0, &res1); set_sysreg_masks(kvm, TCR2_EL2, res0, res1); /* SCTLR_EL1 */ - res0 = SCTLR_EL1_RES0; - res1 = SCTLR_EL1_RES1; - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3)) - res0 |= SCTLR_EL1_EPAN; + get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1); set_sysreg_masks(kvm, SCTLR_EL1, res0, res1); + /* SCTLR2_ELx */ + get_reg_fixed_bits(kvm, SCTLR2_EL1, &res0, &res1); + set_sysreg_masks(kvm, SCTLR2_EL1, res0, res1); + get_reg_fixed_bits(kvm, SCTLR2_EL2, &res0, &res1); + set_sysreg_masks(kvm, SCTLR2_EL2, res0, res1); + /* MDCR_EL2 */ - res0 = MDCR_EL2_RES0; - res1 = MDCR_EL2_RES1; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) - res0 |= (MDCR_EL2_HPMN | MDCR_EL2_TPMCR | - MDCR_EL2_TPM | MDCR_EL2_HPME); - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) - res0 |= MDCR_EL2_E2PB | MDCR_EL2_TPMS; - if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, SPMU, IMP)) - res0 |= MDCR_EL2_EnSPM; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P1)) - res0 |= MDCR_EL2_HPMD; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP)) - res0 |= MDCR_EL2_TTRF; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) - res0 |= MDCR_EL2_HCCD | MDCR_EL2_HLP; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP)) - res0 |= MDCR_EL2_E2TB; - if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP)) - res0 |= MDCR_EL2_TDCC; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, MTPMU, IMP) || - kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP)) - res0 |= MDCR_EL2_MTPME; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P7)) - res0 |= MDCR_EL2_HPMFZO; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSS, IMP)) - res0 |= MDCR_EL2_PMSSE; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2)) - res0 |= MDCR_EL2_HPMFZS; - if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, EBEP, IMP)) - res0 |= MDCR_EL2_PMEE; - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, V8P9)) - res0 |= MDCR_EL2_EBWE; - if (!kvm_has_feat(kvm, ID_AA64DFR2_EL1, STEP, IMP)) - res0 |= MDCR_EL2_EnSTEPOP; + get_reg_fixed_bits(kvm, MDCR_EL2, &res0, &res1); set_sysreg_masks(kvm, MDCR_EL2, res0, res1); /* CNTHCTL_EL2 */ @@ -1318,9 +1723,12 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu) res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount; set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1); + /* VNCR_EL2 */ + set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1); + out: for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++) - (void)__vcpu_sys_reg(vcpu, sr); + __vcpu_rmw_sys_reg(vcpu, sr, |=, 0); return 0; } @@ -1338,7 +1746,50 @@ void check_nested_vcpu_requests(struct kvm_vcpu *vcpu) write_unlock(&vcpu->kvm->mmu_lock); } + if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu)) + kvm_map_l1_vncr(vcpu); + /* Must be last, as may switch context! */ if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu)) kvm_inject_nested_irq(vcpu); } + +/* + * One of the many architectural bugs in FEAT_NV2 is that the guest hypervisor + * can write to HCR_EL2 behind our back, potentially changing the exception + * routing / masking for even the host context. + * + * What follows is some slop to (1) react to exception routing / masking and (2) + * preserve the pending SError state across translation regimes. + */ +void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu) +{ + if (!vcpu_has_nv(vcpu)) + return; + + if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING))) + kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu)); +} + +void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu) +{ + unsigned long *hcr = vcpu_hcr(vcpu); + + if (!vcpu_has_nv(vcpu)) + return; + + /* + * We previously decided that an SError was deliverable to the guest. + * Reap the pending state from HCR_EL2 and... + */ + if (unlikely(__test_and_clear_bit(__ffs(HCR_VSE), hcr))) + vcpu_set_flag(vcpu, NESTED_SERROR_PENDING); + + /* + * Re-attempt SError injection in case the deliverability has changed, + * which is necessary to faithfully emulate WFI the case of a pending + * SError being a wakeup condition. + */ + if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING))) + kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu)); +} diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 0f89157d31fd6..fcd70bfe44fb8 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -5,12 +5,12 @@ */ #include <linux/init.h> +#include <linux/interval_tree_generic.h> #include <linux/kmemleak.h> #include <linux/kvm_host.h> #include <asm/kvm_mmu.h> #include <linux/memblock.h> #include <linux/mutex.h> -#include <linux/sort.h> #include <asm/kvm_pkvm.h> @@ -24,23 +24,6 @@ static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); phys_addr_t hyp_mem_base; phys_addr_t hyp_mem_size; -static int cmp_hyp_memblock(const void *p1, const void *p2) -{ - const struct memblock_region *r1 = p1; - const struct memblock_region *r2 = p2; - - return r1->base < r2->base ? -1 : (r1->base > r2->base); -} - -static void __init sort_memblock_regions(void) -{ - sort(hyp_memory, - *hyp_memblock_nr_ptr, - sizeof(struct memblock_region), - cmp_hyp_memblock, - NULL); -} - static int __init register_memblock_regions(void) { struct memblock_region *reg; @@ -52,7 +35,6 @@ static int __init register_memblock_regions(void) hyp_memory[*hyp_memblock_nr_ptr] = *reg; (*hyp_memblock_nr_ptr)++; } - sort_memblock_regions(); return 0; } @@ -79,6 +61,7 @@ void __init kvm_hyp_reserve(void) hyp_mem_pages += host_s2_pgtable_pages(); hyp_mem_pages += hyp_vm_table_pages(); hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE); + hyp_mem_pages += pkvm_selftest_pages(); hyp_mem_pages += hyp_ffa_proxy_pages(); /* @@ -262,6 +245,7 @@ static int __init finalize_pkvm(void) * at, which would end badly once inaccessible. */ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); + kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start); kmemleak_free_part(__hyp_rodata_start, __hyp_rodata_end - __hyp_rodata_start); kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size); @@ -273,80 +257,68 @@ static int __init finalize_pkvm(void) } device_initcall_sync(finalize_pkvm); -static int cmp_mappings(struct rb_node *node, const struct rb_node *parent) +static u64 __pkvm_mapping_start(struct pkvm_mapping *m) { - struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node); - struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node); - - if (a->gfn < b->gfn) - return -1; - if (a->gfn > b->gfn) - return 1; - return 0; + return m->gfn * PAGE_SIZE; } -static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn) +static u64 __pkvm_mapping_end(struct pkvm_mapping *m) { - struct rb_node *node = root->rb_node, *prev = NULL; - struct pkvm_mapping *mapping; - - while (node) { - mapping = rb_entry(node, struct pkvm_mapping, node); - if (mapping->gfn == gfn) - return node; - prev = node; - node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right; - } - - return prev; + return (m->gfn + m->nr_pages) * PAGE_SIZE - 1; } +INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last, + __pkvm_mapping_start, __pkvm_mapping_end, static, + pkvm_mapping); + /* - * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing - * of __map inline. + * __tmp is updated to iter_first(pkvm_mappings) *before* entering the body of the loop to allow + * freeing of __map inline. */ #define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \ - for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \ - ((__start) >> PAGE_SHIFT)); \ + for (struct pkvm_mapping *__tmp = pkvm_mapping_iter_first(&(__pgt)->pkvm_mappings, \ + __start, __end - 1); \ __tmp && ({ \ - __map = rb_entry(__tmp, struct pkvm_mapping, node); \ - __tmp = rb_next(__tmp); \ + __map = __tmp; \ + __tmp = pkvm_mapping_iter_next(__map, __start, __end - 1); \ true; \ }); \ - ) \ - if (__map->gfn < ((__start) >> PAGE_SHIFT)) \ - continue; \ - else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \ - break; \ - else + ) int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops) { - pgt->pkvm_mappings = RB_ROOT; + pgt->pkvm_mappings = RB_ROOT_CACHED; pgt->mmu = mmu; return 0; } -void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) +static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end) { struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); pkvm_handle_t handle = kvm->arch.pkvm.handle; struct pkvm_mapping *mapping; - struct rb_node *node; + int ret; if (!handle) - return; + return 0; - node = rb_first(&pgt->pkvm_mappings); - while (node) { - mapping = rb_entry(node, struct pkvm_mapping, node); - kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); - node = rb_next(node); - rb_erase(&mapping->node, &pgt->pkvm_mappings); + for_each_mapping_in_range_safe(pgt, start, end, mapping) { + ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, + mapping->nr_pages); + if (WARN_ON(ret)) + return ret; + pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); kfree(mapping); } + + return 0; +} + +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) +{ + __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL)); } int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, @@ -360,42 +332,46 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 pfn = phys >> PAGE_SHIFT; int ret; - if (size != PAGE_SIZE) + if (size != PAGE_SIZE && size != PMD_SIZE) return -EINVAL; lockdep_assert_held_write(&kvm->mmu_lock); - ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot); - if (ret) { - /* Is the gfn already mapped due to a racing vCPU? */ - if (ret == -EPERM) + + /* + * Calling stage2_map() on top of existing mappings is either happening because of a race + * with another vCPU, or because we're changing between page and block mappings. As per + * user_mem_abort(), same-size permission faults are handled in the relax_perms() path. + */ + mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); + if (mapping) { + if (size == (mapping->nr_pages * PAGE_SIZE)) return -EAGAIN; + + /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */ + ret = __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); + if (ret) + return ret; + mapping = NULL; } + ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot); + if (WARN_ON(ret)) + return ret; + swap(mapping, cache->mapping); mapping->gfn = gfn; mapping->pfn = pfn; - WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings)); + mapping->nr_pages = size / PAGE_SIZE; + pkvm_mapping_insert(mapping, &pgt->pkvm_mappings); return ret; } int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { - struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); - pkvm_handle_t handle = kvm->arch.pkvm.handle; - struct pkvm_mapping *mapping; - int ret = 0; - - lockdep_assert_held_write(&kvm->mmu_lock); - for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) { - ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); - if (WARN_ON(ret)) - break; - rb_erase(&mapping->node, &pgt->pkvm_mappings); - kfree(mapping); - } + lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock); - return ret; + return __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); } int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) @@ -407,7 +383,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) lockdep_assert_held(&kvm->mmu_lock); for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) { - ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn); + ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn, + mapping->nr_pages); if (WARN_ON(ret)) break; } @@ -422,7 +399,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) lockdep_assert_held(&kvm->mmu_lock); for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) - __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE); + __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), + PAGE_SIZE * mapping->nr_pages); return 0; } @@ -437,7 +415,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 lockdep_assert_held(&kvm->mmu_lock); for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn, - mkold); + mapping->nr_pages, mkold); return young; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index a1bc10d7116a5..b03dbda7f1ab9 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) val |= lower_32_bits(val); } - __vcpu_sys_reg(vcpu, reg) = val; + __vcpu_assign_sys_reg(vcpu, reg, val); /* Recreate the perf event to reflect the updated sample_period */ kvm_pmu_create_perf_event(pmc); @@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) { kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); - __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val; + __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val); kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); } @@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) reg = counter_index_to_reg(pmc->idx); - __vcpu_sys_reg(vcpu, reg) = val; + __vcpu_assign_sys_reg(vcpu, reg, val); kvm_pmu_release_perf_event(pmc); } @@ -280,7 +280,7 @@ static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu) return 0; hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); - n = vcpu->kvm->arch.pmcr_n; + n = vcpu->kvm->arch.nr_pmu_counters; /* * Programming HPMN to a value greater than PMCR_EL0.N is @@ -503,14 +503,14 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; if (!kvm_pmc_is_64bit(pmc)) reg = lower_32_bits(reg); - __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; + __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg); /* No overflow? move on */ if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) continue; /* Mark overflow */ - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i)); if (kvm_pmu_counter_can_chain(pmc)) kvm_pmu_counter_increment(vcpu, BIT(i + 1), @@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, perf_event->attr.sample_period = period; perf_event->hw.sample_period = period; - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx)); if (kvm_pmu_counter_can_chain(pmc)) kvm_pmu_counter_increment(vcpu, BIT(idx + 1), @@ -602,20 +602,18 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); /* The reset bits don't indicate any state, and shouldn't be saved. */ - __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); + __vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P))); if (val & ARMV8_PMU_PMCR_C) kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); if (val & ARMV8_PMU_PMCR_P) { - /* - * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply - * to the 'guest' range of counters and never the 'hyp' range. - */ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) & - ~kvm_pmu_hyp_counter_mask(vcpu) & ~BIT(ARMV8_PMU_CYCLE_IDX); + if (!vcpu_is_el2(vcpu)) + mask &= ~kvm_pmu_hyp_counter_mask(vcpu); + for_each_set_bit(i, &mask, 32) kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); } @@ -781,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 reg; reg = counter_index_to_evtreg(pmc->idx); - __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); + __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm))); kvm_pmu_create_perf_event(pmc); } @@ -916,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) { u64 mask = kvm_pmu_implemented_counter_mask(vcpu); - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask); + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask); + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask); kvm_pmu_reprogram_counter_mask(vcpu, mask); } @@ -1027,12 +1025,30 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); } +static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr) +{ + kvm->arch.nr_pmu_counters = nr; + + /* Reset MDCR_EL2.HPMN behind the vcpus' back... */ + if (test_bit(KVM_ARM_VCPU_HAS_EL2, kvm->arch.vcpu_features)) { + struct kvm_vcpu *vcpu; + unsigned long i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2); + val &= ~MDCR_EL2_HPMN; + val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters); + __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); + } + } +} + static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) { lockdep_assert_held(&kvm->arch.config_lock); kvm->arch.arm_pmu = arm_pmu; - kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm); + kvm_arm_set_nr_counters(kvm, kvm_arm_pmu_get_max_counters(kvm)); } /** @@ -1088,6 +1104,20 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) return ret; } +static int kvm_arm_pmu_v3_set_nr_counters(struct kvm_vcpu *vcpu, unsigned int n) +{ + struct kvm *kvm = vcpu->kvm; + + if (!kvm->arch.arm_pmu) + return -EINVAL; + + if (n > kvm_arm_pmu_get_max_counters(kvm)) + return -EINVAL; + + kvm_arm_set_nr_counters(kvm, n); + return 0; +} + int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { struct kvm *kvm = vcpu->kvm; @@ -1184,6 +1214,15 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); } + case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: { + unsigned int __user *uaddr = (unsigned int __user *)(long)attr->addr; + unsigned int n; + + if (get_user(n, uaddr)) + return -EFAULT; + + return kvm_arm_pmu_v3_set_nr_counters(vcpu, n); + } case KVM_ARM_VCPU_PMU_V3_INIT: return kvm_arm_pmu_v3_init(vcpu); } @@ -1222,6 +1261,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) case KVM_ARM_VCPU_PMU_V3_INIT: case KVM_ARM_VCPU_PMU_V3_FILTER: case KVM_ARM_VCPU_PMU_V3_SET_PMU: + case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: if (kvm_vcpu_has_pmu(vcpu)) return 0; } @@ -1260,8 +1300,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); + u64 n = vcpu->kvm->arch.nr_pmu_counters; + + if (vcpu_has_nv(vcpu) && !vcpu_is_el2(vcpu)) + n = FIELD_GET(MDCR_EL2_HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); - return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N); + return u64_replace_bits(pmcr, n, ARMV8_PMU_PMCR_N); } void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f82fcc614e136..959532422d3a3 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -158,6 +158,8 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) if (sve_state) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); + free_page((unsigned long)vcpu->arch.ctxt.vncr_array); + kfree(vcpu->arch.vncr_tlb); kfree(vcpu->arch.ccsidr); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 5dde9285afc80..82ffb3b3b3cf7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -108,7 +108,6 @@ static bool get_el2_to_el1_mapping(unsigned int reg, PURE_EL2_SYSREG( HACR_EL2 ); PURE_EL2_SYSREG( VTTBR_EL2 ); PURE_EL2_SYSREG( VTCR_EL2 ); - PURE_EL2_SYSREG( RVBAR_EL2 ); PURE_EL2_SYSREG( TPIDR_EL2 ); PURE_EL2_SYSREG( HPFAR_EL2 ); PURE_EL2_SYSREG( HCRX_EL2 ); @@ -144,6 +143,7 @@ static bool get_el2_to_el1_mapping(unsigned int reg, MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL ); MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL ); MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL ); + MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL ); default: return false; } @@ -228,7 +228,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) * to reverse-translate virtual EL2 system registers for a * non-VHE guest hypervisor. */ - __vcpu_sys_reg(vcpu, reg) = val; + __vcpu_assign_sys_reg(vcpu, reg, val); switch (reg) { case CNTHCTL_EL2: @@ -263,7 +263,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) return; memory_write: - __vcpu_sys_reg(vcpu, reg) = val; + __vcpu_assign_sys_reg(vcpu, reg, val); } /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ @@ -533,8 +533,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, return ignore_write(vcpu, p); if (p->Op1 == 4) { /* ICC_SRE_EL2 */ - p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | - ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB); + p->regval = KVM_ICC_SRE_EL2; } else { /* ICC_SRE_EL1 */ p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; } @@ -605,7 +604,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) return -EINVAL; - __vcpu_sys_reg(vcpu, rd->reg) = val; + __vcpu_assign_sys_reg(vcpu, rd->reg, val); return 0; } @@ -773,6 +772,12 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return mpidr; } +static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + return REG_HIDDEN; +} + static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -785,13 +790,13 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); - u8 n = vcpu->kvm->arch.pmcr_n; + u8 n = vcpu->kvm->arch.nr_pmu_counters; if (n) mask |= GENMASK(n - 1, 0); reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= mask; + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); return __vcpu_sys_reg(vcpu, r->reg); } @@ -799,7 +804,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); return __vcpu_sys_reg(vcpu, r->reg); } @@ -811,7 +816,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return 0; reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); return __vcpu_sys_reg(vcpu, r->reg); } @@ -819,7 +824,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); return __vcpu_sys_reg(vcpu, r->reg); } @@ -835,7 +840,7 @@ static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) * The value of PMCR.N field is included when the * vCPU register is read via kvm_vcpu_read_pmcr(). */ - __vcpu_sys_reg(vcpu, r->reg) = pmcr; + __vcpu_assign_sys_reg(vcpu, r->reg, pmcr); return __vcpu_sys_reg(vcpu, r->reg); } @@ -907,7 +912,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return false; if (p->is_write) - __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; + __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval); else /* return PMSELR.SEL field */ p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) @@ -1076,7 +1081,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va { u64 mask = kvm_pmu_accessible_counter_mask(vcpu); - __vcpu_sys_reg(vcpu, r->reg) = val & mask; + __vcpu_assign_sys_reg(vcpu, r->reg, val & mask); kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); return 0; @@ -1103,10 +1108,10 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val = p->regval & mask; if (r->Op2 & 0x1) /* accessing PMCNTENSET_EL0 */ - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val); else /* accessing PMCNTENCLR_EL0 */ - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val); kvm_pmu_reprogram_counter_mask(vcpu, val); } else { @@ -1129,10 +1134,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (r->Op2 & 0x1) /* accessing PMINTENSET_EL1 */ - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val); else /* accessing PMINTENCLR_EL1 */ - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val); } else { p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); } @@ -1151,10 +1156,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); else /* accessing PMOVSCLR_EL0 */ - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); } else { p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); } @@ -1185,8 +1190,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!vcpu_mode_priv(vcpu)) return undef_access(vcpu, p, r); - __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = - p->regval & ARMV8_PMU_USERENR_MASK; + __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, + (p->regval & ARMV8_PMU_USERENR_MASK)); } else { p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) & ARMV8_PMU_USERENR_MASK; @@ -1216,8 +1221,9 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, * with the existing KVM behavior. */ if (!kvm_vm_has_ran_once(kvm) && + !vcpu_has_nv(vcpu) && new_n <= kvm_arm_pmu_get_max_counters(kvm)) - kvm->arch.pmcr_n = new_n; + kvm->arch.nr_pmu_counters = new_n; mutex_unlock(&kvm->arch.config_lock); @@ -1236,7 +1242,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, if (!kvm_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; - __vcpu_sys_reg(vcpu, r->reg) = val; + __vcpu_assign_sys_reg(vcpu, r->reg, val); kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); return 0; @@ -1600,23 +1606,25 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val = sanitise_id_aa64pfr0_el1(vcpu, val); break; case SYS_ID_AA64PFR1_EL1: - if (!kvm_has_mte(vcpu->kvm)) + if (!kvm_has_mte(vcpu->kvm)) { val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); + } val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); break; case SYS_ID_AA64PFR2_EL1: - /* We only expose FPMR */ - val &= ID_AA64PFR2_EL1_FPMR; + val &= ID_AA64PFR2_EL1_FPMR | + (kvm_has_mte(vcpu->kvm) ? + ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY : + 0); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) @@ -1641,8 +1649,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ID_AA64MMFR2_EL1_NV; break; case SYS_ID_AA64MMFR3_EL1: - val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE | - ID_AA64MMFR3_EL1_S1PIE; + val &= ID_AA64MMFR3_EL1_TCRX | + ID_AA64MMFR3_EL1_SCTLRX | + ID_AA64MMFR3_EL1_S1POE | + ID_AA64MMFR3_EL1_S1PIE; break; case SYS_ID_MMFR4_EL1: val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); @@ -1809,7 +1819,7 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val) val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP); } - if (kvm_vgic_global_state.type == VGIC_V3) { + if (vgic_is_v3(vcpu->kvm)) { val &= ~ID_AA64PFR0_EL1_GIC_MASK; val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP); } @@ -1951,6 +1961,14 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val))) return -EINVAL; + /* + * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then + * we support GICv3. Fail attempts to do anything but set that to IMP. + */ + if (vgic_is_v3_compat(vcpu->kvm) && + FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP) + return -EINVAL; + return set_id_reg(vcpu, rd, user_val); } @@ -1959,11 +1977,34 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, { u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; + u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val); + u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val); + u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val); /* See set_id_aa64pfr0_el1 for comment about MPAM */ if ((hw_val & mpam_mask) == (user_val & mpam_mask)) user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; + /* + * Previously MTE_frac was hidden from guest. However, if the + * hardware supports MTE2 but not MTE_ASYM_FAULT then a value + * of 0 for this field indicates that the hardware supports + * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported. + * + * As KVM must accept values from KVM provided by user-space, + * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set + * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid + * incorrectly claiming hardware support for MTE_ASYNC in the + * guest. + */ + + if (mte == ID_AA64PFR1_EL1_MTE_MTE2 && + hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI && + user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) { + user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK; + user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK; + } + return set_id_reg(vcpu, rd, user_val); } @@ -2188,7 +2229,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) if (kvm_has_mte(vcpu->kvm)) clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); - __vcpu_sys_reg(vcpu, r->reg) = clidr; + __vcpu_assign_sys_reg(vcpu, r->reg, clidr); return __vcpu_sys_reg(vcpu, r->reg); } @@ -2202,7 +2243,7 @@ static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc)) return -EINVAL; - __vcpu_sys_reg(vcpu, rd->reg) = val; + __vcpu_assign_sys_reg(vcpu, rd->reg, val); return 0; } @@ -2287,15 +2328,6 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu, "trap of EL2 register redirected to EL1"); } -#define EL2_REG(name, acc, rst, v) { \ - SYS_DESC(SYS_##name), \ - .access = acc, \ - .reset = rst, \ - .reg = name, \ - .visibility = el2_visibility, \ - .val = v, \ -} - #define EL2_REG_FILTERED(name, acc, rst, v, filter) { \ SYS_DESC(SYS_##name), \ .access = acc, \ @@ -2305,7 +2337,14 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu, .val = v, \ } +#define EL2_REG(name, acc, rst, v) \ + EL2_REG_FILTERED(name, acc, rst, v, el2_visibility) + #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v) +#define EL2_REG_VNCR_FILT(name, vis) \ + EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis) +#define EL2_REG_VNCR_GICv3(name) \ + EL2_REG_VNCR_FILT(name, hidden_visibility) #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) /* @@ -2385,7 +2424,7 @@ static bool access_sp_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { if (p->is_write) - __vcpu_sys_reg(vcpu, SP_EL1) = p->regval; + __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval); else p->regval = __vcpu_sys_reg(vcpu, SP_EL1); @@ -2409,7 +2448,7 @@ static bool access_spsr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { if (p->is_write) - __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; + __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval); else p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); @@ -2421,7 +2460,7 @@ static bool access_cntkctl_el12(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { if (p->is_write) - __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval; + __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval); else p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); @@ -2435,7 +2474,9 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) val |= HCR_E2H; - return __vcpu_sys_reg(vcpu, r->reg) = val; + __vcpu_assign_sys_reg(vcpu, r->reg, val); + + return __vcpu_sys_reg(vcpu, r->reg); } static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu, @@ -2452,6 +2493,31 @@ static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu, return __el2_visibility(vcpu, rd, sve_visibility); } +static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (el2_visibility(vcpu, rd) == 0 && + kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) + return 0; + + return REG_HIDDEN; +} + +static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (kvm_has_sctlr2(vcpu->kvm)) + return 0; + + return REG_HIDDEN; +} + +static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + return __el2_visibility(vcpu, rd, sctlr2_visibility); +} + static bool access_zcr_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -2482,11 +2548,7 @@ static bool access_gic_vtr(struct kvm_vcpu *vcpu, if (p->is_write) return write_to_read_only(vcpu, p, r); - p->regval = kvm_vgic_global_state.ich_vtr_el2; - p->regval &= ~(ICH_VTR_EL2_DVIM | - ICH_VTR_EL2_A3V | - ICH_VTR_EL2_IDbits); - p->regval |= ICH_VTR_EL2_nV4; + p->regval = kvm_get_guest_vtr_el2(); return true; } @@ -2557,6 +2619,26 @@ static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu, return __el2_visibility(vcpu, rd, tcr2_visibility); } +static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (el2_visibility(vcpu, rd) == 0 && + kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2)) + return 0; + + return REG_HIDDEN; +} + +static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (el2_visibility(vcpu, rd) == 0 && + kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP)) + return 0; + + return REG_HIDDEN; +} + static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { @@ -2576,21 +2658,55 @@ static bool access_mdcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2); + u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2); - if (!access_rw(vcpu, p, r)) - return false; + if (!p->is_write) { + p->regval = old; + return true; + } + + val = p->regval; + hpmn = FIELD_GET(MDCR_EL2_HPMN, val); + + /* + * If HPMN is out of bounds, limit it to what we actually + * support. This matches the UNKNOWN definition of the field + * in that case, and keeps the emulation simple. Sort of. + */ + if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { + hpmn = vcpu->kvm->arch.nr_pmu_counters; + u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN); + } + + __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); /* - * Request a reload of the PMU to enable/disable the counters affected - * by HPME. + * Request a reload of the PMU to enable/disable the counters + * affected by HPME. */ - if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME) + if ((old ^ val) & MDCR_EL2_HPME) kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); return true; } +static bool access_ras(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + struct kvm *kvm = vcpu->kvm; + + switch(reg_to_encoding(r)) { + default: + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { + kvm_inject_undefined(vcpu); + return false; + } + } + + return trap_raz_wi(vcpu, p, r); +} + /* * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them. @@ -2704,6 +2820,12 @@ static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, .set_user = set_imp_id_reg, \ .reset = reset_imp_id_reg, \ .val = mask, \ + } + +static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters); + return vcpu->kvm->arch.nr_pmu_counters; } /* @@ -2812,7 +2934,6 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR0_EL1_FP)), ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1, ~(ID_AA64PFR1_EL1_PFAR | - ID_AA64PFR1_EL1_DF2 | ID_AA64PFR1_EL1_MTEX | ID_AA64PFR1_EL1_THE | ID_AA64PFR1_EL1_GCS | @@ -2824,7 +2945,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR1_EL1_MPAM_frac | ID_AA64PFR1_EL1_RAS_frac | ID_AA64PFR1_EL1_MTE)), - ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), + ID_WRITABLE(ID_AA64PFR2_EL1, + ID_AA64PFR2_EL1_FPMR | + ID_AA64PFR2_EL1_MTEFAR | + ID_AA64PFR2_EL1_MTESTOREONLY), ID_UNALLOCATED(4,3), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ID_HIDDEN(ID_AA64SMFR0_EL1), @@ -2891,6 +3015,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64MMFR2_EL1_NV | ID_AA64MMFR2_EL1_CCIDX)), ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | + ID_AA64MMFR3_EL1_SCTLRX | ID_AA64MMFR3_EL1_S1PIE | ID_AA64MMFR3_EL1_S1POE)), ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac), @@ -2901,6 +3026,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, + { SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0, + .visibility = sctlr2_visibility }, MTE_REG(RGSR_EL1), MTE_REG(GCR_EL1), @@ -2930,14 +3057,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, - { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, - { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, + { SYS_DESC(SYS_ERRIDR_EL1), access_ras }, + { SYS_DESC(SYS_ERRSELR_EL1), access_ras }, + { SYS_DESC(SYS_ERXFR_EL1), access_ras }, + { SYS_DESC(SYS_ERXCTLR_EL1), access_ras }, + { SYS_DESC(SYS_ERXSTATUS_EL1), access_ras }, + { SYS_DESC(SYS_ERXADDR_EL1), access_ras }, + { SYS_DESC(SYS_ERXMISC0_EL1), access_ras }, + { SYS_DESC(SYS_ERXMISC1_EL1), access_ras }, MTE_REG(TFSR_EL1), MTE_REG(TFSRE0_EL1), @@ -3248,12 +3375,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0), EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1), EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), + EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0, + sctlr2_el2_visibility), EL2_REG_VNCR(HCR_EL2, reset_hcr, 0), - EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0), + EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0), EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), EL2_REG_VNCR(HSTR_EL2, reset_val, 0), - EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0), - EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0), + EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility), + EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility), EL2_REG_VNCR(HFGITR_EL2, reset_val, 0), EL2_REG_VNCR(HACR_EL2, reset_val, 0), @@ -3269,11 +3398,18 @@ static const struct sys_reg_desc sys_reg_descs[] = { tcr2_el2_visibility), EL2_REG_VNCR(VTTBR_EL2, reset_val, 0), EL2_REG_VNCR(VTCR_EL2, reset_val, 0), + EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0, + vncr_el2_visibility), { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 }, - EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0), - EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0), - EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0), + EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility), + EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility), + EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility), + EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility), + EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility), + EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility), + EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility), + EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility), EL2_REG_REDIR(SPSR_EL2, reset_val, 0), EL2_REG_REDIR(ELR_EL2, reset_val, 0), { SYS_DESC(SYS_SP_EL1), access_sp_el1}, @@ -3288,6 +3424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), EL2_REG_REDIR(ESR_EL2, reset_val, 0), + EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0), { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 }, EL2_REG_REDIR(FAR_EL2, reset_val, 0), @@ -3314,43 +3451,44 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access }, EL2_REG(VBAR_EL2, access_rw, reset_val, 0), - EL2_REG(RVBAR_EL2, access_rw, reset_val, 0), + { SYS_DESC(SYS_RVBAR_EL2), undef_access }, { SYS_DESC(SYS_RMR_EL2), undef_access }, + EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0), - EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0), + EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2), + EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2), + EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2), + EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2), + EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2), + EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2), + EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2), + EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2), { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre }, - EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0), + EL2_REG_VNCR_GICv3(ICH_HCR_EL2), { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr }, { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr }, { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr }, { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr }, - EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0), - - EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0), - EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0), + EL2_REG_VNCR_GICv3(ICH_VMCR_EL2), + + EL2_REG_VNCR_GICv3(ICH_LR0_EL2), + EL2_REG_VNCR_GICv3(ICH_LR1_EL2), + EL2_REG_VNCR_GICv3(ICH_LR2_EL2), + EL2_REG_VNCR_GICv3(ICH_LR3_EL2), + EL2_REG_VNCR_GICv3(ICH_LR4_EL2), + EL2_REG_VNCR_GICv3(ICH_LR5_EL2), + EL2_REG_VNCR_GICv3(ICH_LR6_EL2), + EL2_REG_VNCR_GICv3(ICH_LR7_EL2), + EL2_REG_VNCR_GICv3(ICH_LR8_EL2), + EL2_REG_VNCR_GICv3(ICH_LR9_EL2), + EL2_REG_VNCR_GICv3(ICH_LR10_EL2), + EL2_REG_VNCR_GICv3(ICH_LR11_EL2), + EL2_REG_VNCR_GICv3(ICH_LR12_EL2), + EL2_REG_VNCR_GICv3(ICH_LR13_EL2), + EL2_REG_VNCR_GICv3(ICH_LR14_EL2), + EL2_REG_VNCR_GICv3(ICH_LR15_EL2), EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0), EL2_REG(TPIDR_EL2, access_rw, reset_val, 0), @@ -3552,8 +3690,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); - u64 base, range, tg, num, scale; - int shift; + u64 base, range; if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) return undef_access(vcpu, p, r); @@ -3563,26 +3700,7 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * of the guest's S2 (different base granule size, for example), we * decide to ignore TTL and only use the described range. */ - tg = FIELD_GET(GENMASK(47, 46), p->regval); - scale = FIELD_GET(GENMASK(45, 44), p->regval); - num = FIELD_GET(GENMASK(43, 39), p->regval); - base = p->regval & GENMASK(36, 0); - - switch(tg) { - case 1: - shift = 12; - break; - case 2: - shift = 14; - break; - case 3: - default: /* IMPDEF: handle tg==0 as 64k */ - shift = 16; - break; - } - - base <<= shift; - range = __TLBI_RANGE_PAGES(num, scale) << shift; + base = decode_range_tlbi(p->regval, &range, NULL); kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), &(union tlbi_info) { @@ -3648,11 +3766,22 @@ static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu, WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); } +static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); + + if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); + + kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); + return true; +} + static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); - u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); /* * If we're here, this is because we've trapped on a EL1 TLBI @@ -3663,6 +3792,13 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * - HCR_EL2.E2H == 0 : a non-VHE guest * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode * + * Another possibility is that we are invalidating the EL2 context + * using EL1 instructions, but that we landed here because we need + * additional invalidation for structures that are not held in the + * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In + * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 } + * as we don't allow an NV-capable L1 in a nVHE configuration. + * * We don't expect these helpers to ever be called when running * in a vEL1 context. */ @@ -3672,7 +3808,13 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) return undef_access(vcpu, p, r); - kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), + if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) { + kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); + return true; + } + + kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, + get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)), &(union tlbi_info) { .va = { .addr = p->regval, @@ -3794,16 +3936,21 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is), - SYS_INSN(TLBI_ALLE2OS, undef_access), - SYS_INSN(TLBI_VAE2OS, undef_access), + SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2), SYS_INSN(TLBI_ALLE1OS, handle_alle1is), - SYS_INSN(TLBI_VALE2OS, undef_access), + SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2), SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is), - SYS_INSN(TLBI_RVAE2IS, undef_access), - SYS_INSN(TLBI_RVALE2IS, undef_access), + SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2), + SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2), SYS_INSN(TLBI_ALLE1IS, handle_alle1is), + + SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2), + SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is), SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is), @@ -3813,11 +3960,17 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is), SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is), - SYS_INSN(TLBI_RVAE2OS, undef_access), - SYS_INSN(TLBI_RVALE2OS, undef_access), - SYS_INSN(TLBI_RVAE2, undef_access), - SYS_INSN(TLBI_RVALE2, undef_access), + SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2), + SYS_INSN(TLBI_RVAE2, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2, handle_tlbi_el2), + SYS_INSN(TLBI_ALLE2, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2, handle_tlbi_el2), + SYS_INSN(TLBI_ALLE1, handle_alle1is), + + SYS_INSN(TLBI_VALE2, handle_tlbi_el2), + SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is), @@ -3825,19 +3978,19 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is), - SYS_INSN(TLBI_ALLE2OSNXS, undef_access), - SYS_INSN(TLBI_VAE2OSNXS, undef_access), + SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2), SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is), - SYS_INSN(TLBI_VALE2OSNXS, undef_access), + SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2), SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is), - SYS_INSN(TLBI_RVAE2ISNXS, undef_access), - SYS_INSN(TLBI_RVALE2ISNXS, undef_access), - SYS_INSN(TLBI_ALLE2ISNXS, undef_access), - SYS_INSN(TLBI_VAE2ISNXS, undef_access), + SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2), + SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2), SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is), - SYS_INSN(TLBI_VALE2ISNXS, undef_access), + SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2), SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is), SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is), SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is), @@ -3847,14 +4000,14 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is), SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is), - SYS_INSN(TLBI_RVAE2OSNXS, undef_access), - SYS_INSN(TLBI_RVALE2OSNXS, undef_access), - SYS_INSN(TLBI_RVAE2NXS, undef_access), - SYS_INSN(TLBI_RVALE2NXS, undef_access), - SYS_INSN(TLBI_ALLE2NXS, undef_access), - SYS_INSN(TLBI_VAE2NXS, undef_access), + SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2), + SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2), + SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2), + SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2), + SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2), SYS_INSN(TLBI_ALLE1NXS, handle_alle1is), - SYS_INSN(TLBI_VALE2NXS, undef_access), + SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2), SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is), }; @@ -4204,12 +4357,12 @@ static const struct sys_reg_desc cp15_64_regs[] = { }; static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, - bool is_32) + bool reset_check) { unsigned int i; for (i = 0; i < n; i++) { - if (!is_32 && table[i].reg && !table[i].reset) { + if (reset_check && table[i].reg && !table[i].reset) { kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n", &table[i], i, table[i].name); return false; @@ -4404,7 +4557,7 @@ static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params) return true; kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", - params->is_write ? "write" : "read", reg_id); + str_write_read(params->is_write), reg_id); return false; } @@ -4721,7 +4874,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) r->reset(vcpu, r); if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) - (void)__vcpu_sys_reg(vcpu, r->reg); + __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); } set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); @@ -4943,7 +5096,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, if (r->set_user) { ret = (r->set_user)(vcpu, r, val); } else { - __vcpu_sys_reg(vcpu, r->reg) = val; + __vcpu_assign_sys_reg(vcpu, r->reg, val); ret = 0; } @@ -5153,65 +5306,13 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) goto out; - kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 | - HFGxTR_EL2_nMAIR2_EL1 | - HFGxTR_EL2_nS2POR_EL1 | - HFGxTR_EL2_nACCDATA_EL1 | - HFGxTR_EL2_nSMPRI_EL1_MASK | - HFGxTR_EL2_nTPIDR2_EL0_MASK); - - if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) - kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS| - HFGITR_EL2_TLBIRVALE1OS | - HFGITR_EL2_TLBIRVAAE1OS | - HFGITR_EL2_TLBIRVAE1OS | - HFGITR_EL2_TLBIVAALE1OS | - HFGITR_EL2_TLBIVALE1OS | - HFGITR_EL2_TLBIVAAE1OS | - HFGITR_EL2_TLBIASIDE1OS | - HFGITR_EL2_TLBIVAE1OS | - HFGITR_EL2_TLBIVMALLE1OS); - - if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) - kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 | - HFGITR_EL2_TLBIRVALE1 | - HFGITR_EL2_TLBIRVAAE1 | - HFGITR_EL2_TLBIRVAE1 | - HFGITR_EL2_TLBIRVAALE1IS| - HFGITR_EL2_TLBIRVALE1IS | - HFGITR_EL2_TLBIRVAAE1IS | - HFGITR_EL2_TLBIRVAE1IS | - HFGITR_EL2_TLBIRVAALE1OS| - HFGITR_EL2_TLBIRVALE1OS | - HFGITR_EL2_TLBIRVAAE1OS | - HFGITR_EL2_TLBIRVAE1OS); - - if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) - kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A; - - if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2)) - kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP | - HFGITR_EL2_ATS1E1WP); - - if (!kvm_has_s1pie(kvm)) - kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | - HFGxTR_EL2_nPIR_EL1); - - if (!kvm_has_s1poe(kvm)) - kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 | - HFGxTR_EL2_nPOR_EL0); - - if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) - kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | - HAFGRTR_EL2_RES1); - - if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) { - kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA | - HDFGRTR_EL2_nBRBCTL | - HDFGRTR_EL2_nBRBIDR); - kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ | - HFGITR_EL2_nBRBIALL); - } + compute_fgu(kvm, HFGRTR_GROUP); + compute_fgu(kvm, HFGITR_GROUP); + compute_fgu(kvm, HDFGRTR_GROUP); + compute_fgu(kvm, HAFGRTR_GROUP); + compute_fgu(kvm, HFGRTR2_GROUP); + compute_fgu(kvm, HFGITR2_GROUP); + compute_fgu(kvm, HDFGRTR2_GROUP); set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); out: @@ -5250,18 +5351,22 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) int __init kvm_sys_reg_table_init(void) { + const struct sys_reg_desc *gicv3_regs; bool valid = true; - unsigned int i; + unsigned int i, sz; int ret = 0; /* Make sure tables are unique and in order. */ - valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); - valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true); - valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true); - valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); - valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); + valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true); + valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false); + valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false); + valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false); + valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false); valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false); + gicv3_regs = vgic_v3_get_sysreg_table(&sz); + valid &= check_sysreg_table(gicv3_regs, sz, false); + if (!valid) return -EINVAL; @@ -5269,6 +5374,8 @@ int __init kvm_sys_reg_table_init(void) ret = populate_nv_trap_config(); + check_feature_map(); + for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++) ret = populate_sysreg_config(sys_reg_descs + i, i); diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index cc6338d387663..317abc490368d 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -108,7 +108,7 @@ inline void print_sys_reg_msg(const struct sys_reg_params *p, /* Look, we even formatted it for you to paste into the table! */ kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", &(struct va_format){ fmt, &va }, - p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); + p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, str_write_read(p->is_write)); va_end(va); } @@ -137,7 +137,7 @@ static inline u64 reset_unknown(struct kvm_vcpu *vcpu, { BUG_ON(!r->reg); BUG_ON(r->reg >= NR_SYS_REGS); - __vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; + __vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL); return __vcpu_sys_reg(vcpu, r->reg); } @@ -145,7 +145,7 @@ static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { BUG_ON(!r->reg); BUG_ON(r->reg >= NR_SYS_REGS); - __vcpu_sys_reg(vcpu, r->reg) = r->val; + __vcpu_assign_sys_reg(vcpu, r->reg, r->val); return __vcpu_sys_reg(vcpu, r->reg); } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index c18c1a95831e1..9c60f6465c787 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -176,7 +176,7 @@ TRACE_EVENT(kvm_set_way_flush, ), TP_printk("S/W flush at 0x%016lx (cache %s)", - __entry->vcpu_pc, __entry->cache ? "on" : "off") + __entry->vcpu_pc, str_on_off(__entry->cache)) ); TRACE_EVENT(kvm_toggle_cache, @@ -196,8 +196,8 @@ TRACE_EVENT(kvm_toggle_cache, ), TP_printk("VM op at 0x%016lx (cache was %s, now %s)", - __entry->vcpu_pc, __entry->was ? "on" : "off", - __entry->now ? "on" : "off") + __entry->vcpu_pc, str_on_off(__entry->was), + str_on_off(__entry->now)) ); /* diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h index f85415db7713b..a7ab9a3bbed0b 100644 --- a/arch/arm64/kvm/trace_handle_exit.h +++ b/arch/arm64/kvm/trace_handle_exit.h @@ -113,7 +113,7 @@ TRACE_EVENT(kvm_sys_access, __entry->vcpu_pc, __entry->name ?: "UNKN", __entry->Op0, __entry->Op1, __entry->CRn, __entry->CRm, __entry->Op2, - __entry->is_write ? "write" : "read") + str_write_read(__entry->is_write)) ); TRACE_EVENT(kvm_set_guest_debug, diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c index 5eacb4b3250a1..bdc2d57370b27 100644 --- a/arch/arm64/kvm/vgic-sys-reg-v3.c +++ b/arch/arm64/kvm/vgic-sys-reg-v3.c @@ -297,6 +297,91 @@ static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, return 0; } +static int set_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + __vcpu_assign_sys_reg(vcpu, r->reg, val); + return 0; +} + +static int get_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + *val = __vcpu_sys_reg(vcpu, r->reg); + return 0; +} + +static int set_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + u8 idx = r->Op2 & 3; + + if (idx > vgic_v3_max_apr_idx(vcpu)) + return -EINVAL; + + return set_gic_ich_reg(vcpu, r, val); +} + +static int get_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + u8 idx = r->Op2 & 3; + + if (idx > vgic_v3_max_apr_idx(vcpu)) + return -EINVAL; + + return get_gic_ich_reg(vcpu, r, val); +} + +static int set_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + if (val != KVM_ICC_SRE_EL2) + return -EINVAL; + return 0; +} + +static int get_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + *val = KVM_ICC_SRE_EL2; + return 0; +} + +static int set_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + if (val != kvm_get_guest_vtr_el2()) + return -EINVAL; + return 0; +} + +static int get_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + *val = kvm_get_guest_vtr_el2(); + return 0; +} + +static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + return vcpu_has_nv(vcpu) ? 0 : REG_HIDDEN; +} + +#define __EL2_REG(r, acc, i) \ + { \ + SYS_DESC(SYS_ ## r), \ + .get_user = get_gic_ ## acc, \ + .set_user = set_gic_ ## acc, \ + .reg = i, \ + .visibility = el2_visibility, \ + } + +#define EL2_REG(r, acc) __EL2_REG(r, acc, r) + +#define EL2_REG_RO(r, acc) __EL2_REG(r, acc, 0) + static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { { SYS_DESC(SYS_ICC_PMR_EL1), .set_user = set_gic_pmr, .get_user = get_gic_pmr, }, @@ -328,8 +413,42 @@ static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, }, { SYS_DESC(SYS_ICC_IGRPEN1_EL1), .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, }, + EL2_REG(ICH_AP0R0_EL2, ich_apr), + EL2_REG(ICH_AP0R1_EL2, ich_apr), + EL2_REG(ICH_AP0R2_EL2, ich_apr), + EL2_REG(ICH_AP0R3_EL2, ich_apr), + EL2_REG(ICH_AP1R0_EL2, ich_apr), + EL2_REG(ICH_AP1R1_EL2, ich_apr), + EL2_REG(ICH_AP1R2_EL2, ich_apr), + EL2_REG(ICH_AP1R3_EL2, ich_apr), + EL2_REG_RO(ICC_SRE_EL2, icc_sre), + EL2_REG(ICH_HCR_EL2, ich_reg), + EL2_REG_RO(ICH_VTR_EL2, ich_vtr), + EL2_REG(ICH_VMCR_EL2, ich_reg), + EL2_REG(ICH_LR0_EL2, ich_reg), + EL2_REG(ICH_LR1_EL2, ich_reg), + EL2_REG(ICH_LR2_EL2, ich_reg), + EL2_REG(ICH_LR3_EL2, ich_reg), + EL2_REG(ICH_LR4_EL2, ich_reg), + EL2_REG(ICH_LR5_EL2, ich_reg), + EL2_REG(ICH_LR6_EL2, ich_reg), + EL2_REG(ICH_LR7_EL2, ich_reg), + EL2_REG(ICH_LR8_EL2, ich_reg), + EL2_REG(ICH_LR9_EL2, ich_reg), + EL2_REG(ICH_LR10_EL2, ich_reg), + EL2_REG(ICH_LR11_EL2, ich_reg), + EL2_REG(ICH_LR12_EL2, ich_reg), + EL2_REG(ICH_LR13_EL2, ich_reg), + EL2_REG(ICH_LR14_EL2, ich_reg), + EL2_REG(ICH_LR15_EL2, ich_reg), }; +const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz) +{ + *sz = ARRAY_SIZE(gic_v3_icc_reg_descs); + return gic_v3_icc_reg_descs; +} + static u64 attr_to_id(u64 attr) { return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr), @@ -341,8 +460,12 @@ static u64 attr_to_id(u64 attr) int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { - if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs, - ARRAY_SIZE(gic_v3_icc_reg_descs))) + const struct sys_reg_desc *r; + + r = get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs, + ARRAY_SIZE(gic_v3_icc_reg_descs)); + + if (r && !sysreg_hidden(vcpu, r)) return 0; return -ENXIO; diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index afb018528bc3b..2684f273d9e17 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -320,3 +320,230 @@ void vgic_debug_init(struct kvm *kvm) void vgic_debug_destroy(struct kvm *kvm) { } + +/** + * struct vgic_its_iter - Iterator for traversing VGIC ITS device tables. + * @dev: Pointer to the current its_device being processed. + * @ite: Pointer to the current its_ite within the device being processed. + * + * This structure is used to maintain the current position during iteration + * over the ITS device tables. It holds pointers to both the current device + * and the current ITE within that device. + */ +struct vgic_its_iter { + struct its_device *dev; + struct its_ite *ite; +}; + +/** + * end_of_iter - Checks if the iterator has reached the end. + * @iter: The iterator to check. + * + * When the iterator completed processing the final ITE in the last device + * table, it was marked to indicate the end of iteration by setting its + * device and ITE pointers to NULL. + * This function checks whether the iterator was marked as end. + * + * Return: True if the iterator is marked as end, false otherwise. + */ +static inline bool end_of_iter(struct vgic_its_iter *iter) +{ + return !iter->dev && !iter->ite; +} + +/** + * vgic_its_iter_next - Advances the iterator to the next entry in the ITS tables. + * @its: The VGIC ITS structure. + * @iter: The iterator to advance. + * + * This function moves the iterator to the next ITE within the current device, + * or to the first ITE of the next device if the current ITE is the last in + * the device. If the current device is the last device, the iterator is set + * to indicate the end of iteration. + */ +static void vgic_its_iter_next(struct vgic_its *its, struct vgic_its_iter *iter) +{ + struct its_device *dev = iter->dev; + struct its_ite *ite = iter->ite; + + if (!ite || list_is_last(&ite->ite_list, &dev->itt_head)) { + if (list_is_last(&dev->dev_list, &its->device_list)) { + dev = NULL; + ite = NULL; + } else { + dev = list_next_entry(dev, dev_list); + ite = list_first_entry_or_null(&dev->itt_head, + struct its_ite, + ite_list); + } + } else { + ite = list_next_entry(ite, ite_list); + } + + iter->dev = dev; + iter->ite = ite; +} + +/** + * vgic_its_debug_start - Start function for the seq_file interface. + * @s: The seq_file structure. + * @pos: The starting position (offset). + * + * This function initializes the iterator to the beginning of the ITS tables + * and advances it to the specified position. It acquires the its_lock mutex + * to protect shared data. + * + * Return: An iterator pointer on success, NULL if no devices are found or + * the end of the list is reached, or ERR_PTR(-ENOMEM) on memory + * allocation failure. + */ +static void *vgic_its_debug_start(struct seq_file *s, loff_t *pos) +{ + struct vgic_its *its = s->private; + struct vgic_its_iter *iter; + struct its_device *dev; + loff_t offset = *pos; + + mutex_lock(&its->its_lock); + + dev = list_first_entry_or_null(&its->device_list, + struct its_device, dev_list); + if (!dev) + return NULL; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return ERR_PTR(-ENOMEM); + + iter->dev = dev; + iter->ite = list_first_entry_or_null(&dev->itt_head, + struct its_ite, ite_list); + + while (!end_of_iter(iter) && offset--) + vgic_its_iter_next(its, iter); + + if (end_of_iter(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +/** + * vgic_its_debug_next - Next function for the seq_file interface. + * @s: The seq_file structure. + * @v: The current iterator. + * @pos: The current position (offset). + * + * This function advances the iterator to the next entry and increments the + * position. + * + * Return: An iterator pointer on success, or NULL if the end of the list is + * reached. + */ +static void *vgic_its_debug_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct vgic_its *its = s->private; + struct vgic_its_iter *iter = v; + + ++*pos; + vgic_its_iter_next(its, iter); + + if (end_of_iter(iter)) { + kfree(iter); + return NULL; + } + return iter; +} + +/** + * vgic_its_debug_stop - Stop function for the seq_file interface. + * @s: The seq_file structure. + * @v: The current iterator. + * + * This function frees the iterator and releases the its_lock mutex. + */ +static void vgic_its_debug_stop(struct seq_file *s, void *v) +{ + struct vgic_its *its = s->private; + struct vgic_its_iter *iter = v; + + if (!IS_ERR_OR_NULL(iter)) + kfree(iter); + mutex_unlock(&its->its_lock); +} + +/** + * vgic_its_debug_show - Show function for the seq_file interface. + * @s: The seq_file structure. + * @v: The current iterator. + * + * This function formats and prints the ITS table entry information to the + * seq_file output. + * + * Return: 0 on success. + */ +static int vgic_its_debug_show(struct seq_file *s, void *v) +{ + struct vgic_its_iter *iter = v; + struct its_device *dev = iter->dev; + struct its_ite *ite = iter->ite; + + if (!ite) + return 0; + + if (list_is_first(&ite->ite_list, &dev->itt_head)) { + seq_printf(s, "\n"); + seq_printf(s, "Device ID: 0x%x, Event ID Range: [0 - %llu]\n", + dev->device_id, BIT_ULL(dev->num_eventid_bits) - 1); + seq_printf(s, "EVENT_ID INTID HWINTID TARGET COL_ID HW\n"); + seq_printf(s, "-----------------------------------------------\n"); + } + + if (ite->irq && ite->collection) { + seq_printf(s, "%8u %8u %8u %8u %8u %2d\n", + ite->event_id, ite->irq->intid, ite->irq->hwintid, + ite->collection->target_addr, + ite->collection->collection_id, ite->irq->hw); + } + + return 0; +} + +static const struct seq_operations vgic_its_debug_sops = { + .start = vgic_its_debug_start, + .next = vgic_its_debug_next, + .stop = vgic_its_debug_stop, + .show = vgic_its_debug_show +}; + +DEFINE_SEQ_ATTRIBUTE(vgic_its_debug); + +/** + * vgic_its_debug_init - Initializes the debugfs interface for VGIC ITS. + * @dev: The KVM device structure. + * + * This function creates a debugfs file named "vgic-its-state@%its_base" + * to expose the ITS table information. + * + * Return: 0 on success. + */ +int vgic_its_debug_init(struct kvm_device *dev) +{ + struct vgic_its *its = dev->private; + char *name; + + name = kasprintf(GFP_KERNEL, "vgic-its-state@%llx", (u64)its->vgic_its_base); + if (!name) + return -ENOMEM; + + debugfs_create_file(name, 0444, dev->kvm->debugfs_dentry, its, &vgic_its_debug_fops); + + kfree(name); + return 0; +} + +void vgic_its_debug_destroy(struct kvm_device *dev) +{ +} diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 1f33e71c2a731..1e680ad6e8635 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -84,15 +84,40 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) !kvm_vgic_global_state.can_emulate_gicv2) return -ENODEV; - /* Must be held to avoid race with vCPU creation */ + /* + * Ensure mutual exclusion with vCPU creation and any vCPU ioctls by: + * + * - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching + * kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable. + * This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops + * the kvm->lock before completing the vCPU creation. + */ lockdep_assert_held(&kvm->lock); + /* + * - Acquiring the vCPU mutex for every *online* vCPU to prevent + * concurrent vCPU ioctls for vCPUs already visible to userspace. + */ ret = -EBUSY; - if (!lock_all_vcpus(kvm)) + if (kvm_trylock_all_vcpus(kvm)) return ret; + /* + * - Taking the config_lock which protects VGIC data structures such + * as the per-vCPU arrays of private IRQs (SGIs, PPIs). + */ mutex_lock(&kvm->arch.config_lock); + /* + * - Bailing on the entire thing if a vCPU is in the middle of creation, + * dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create(). + * + * The whole combination of this guarantees that no vCPU can get into + * KVM with a VGIC configuration inconsistent with the VM's VGIC. + */ + if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) + goto out_unlock; + if (irqchip_in_kernel(kvm)) { ret = -EEXIST; goto out_unlock; @@ -132,6 +157,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) kvm->arch.vgic.in_kernel = true; kvm->arch.vgic.vgic_model = type; + kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; @@ -140,9 +166,12 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) else INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); + if (type == KVM_DEV_TYPE_ARM_VGIC_V3) + kvm->arch.vgic.nassgicap = system_supports_direct_sgis(); + out_unlock: mutex_unlock(&kvm->arch.config_lock); - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); return ret; } @@ -366,11 +395,10 @@ int vgic_init(struct kvm *kvm) goto out; /* - * If we have GICv4.1 enabled, unconditionally request enable the - * v4 support so that we get HW-accelerated vSGIs. Otherwise, only - * enable it if we present a virtual ITS to the guest. + * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs, + * vLPIs) is supported. */ - if (vgic_supports_direct_msis(kvm)) { + if (vgic_supports_direct_irqs(kvm)) { ret = vgic_v4_init(kvm); if (ret) goto out; @@ -384,15 +412,7 @@ int vgic_init(struct kvm *kvm) goto out; vgic_debug_init(kvm); - - /* - * If userspace didn't set the GIC implementation revision, - * default to the latest and greatest. You know want it. - */ - if (!dist->implementation_rev) - dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST; dist->initialized = true; - out: return ret; } @@ -418,7 +438,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) dist->vgic_cpu_base = VGIC_ADDR_UNDEF; } - if (vgic_supports_direct_msis(kvm)) + if (vgic_supports_direct_irqs(kvm)) vgic_v4_teardown(kvm); xa_destroy(&dist->lpi_xa); @@ -649,10 +669,12 @@ void kvm_vgic_init_cpu_hardware(void) * We want to make sure the list registers start out clear so that we * only have the program the used registers. */ - if (kvm_vgic_global_state.type == VGIC_V2) + if (kvm_vgic_global_state.type == VGIC_V2) { vgic_v2_init_lrs(); - else + } else if (kvm_vgic_global_state.type == VGIC_V3 || + kvm_vgic_global_state.has_gcie_v3_compat) { kvm_call_hyp(__vgic_v3_init_lrs); + } } /** @@ -697,6 +719,9 @@ int kvm_vgic_hyp_init(void) kvm_info("GIC system register CPU interface enabled\n"); } break; + case GIC_V5: + ret = vgic_v5_probe(gic_kvm_info); + break; default: ret = -ENODEV; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index fb96802799c6f..7368c13f16b72 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -154,36 +154,6 @@ out_unlock: return irq; } -struct its_device { - struct list_head dev_list; - - /* the head for the list of ITTEs */ - struct list_head itt_head; - u32 num_eventid_bits; - gpa_t itt_addr; - u32 device_id; -}; - -#define COLLECTION_NOT_MAPPED ((u32)~0) - -struct its_collection { - struct list_head coll_list; - - u32 collection_id; - u32 target_addr; -}; - -#define its_is_collection_mapped(coll) ((coll) && \ - ((coll)->target_addr != COLLECTION_NOT_MAPPED)) - -struct its_ite { - struct list_head ite_list; - - struct vgic_irq *irq; - struct its_collection *collection; - u32 event_id; -}; - /** * struct vgic_its_abi - ITS abi ops and settings * @cte_esz: collection table entry size @@ -336,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, } } - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) - return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); + ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv); - return 0; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + return ret; } static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) { - int ret = 0; - unsigned long flags; + struct its_vlpi_map map; + int ret; - raw_spin_lock_irqsave(&irq->irq_lock, flags); + guard(raw_spinlock_irqsave)(&irq->irq_lock); irq->target_vcpu = vcpu; - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) { - struct its_vlpi_map map; - - ret = its_get_vlpi(irq->host_irq, &map); - if (ret) - return ret; + if (!irq->hw) + return 0; - if (map.vpe) - atomic_dec(&map.vpe->vlpi_count); - map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - atomic_inc(&map.vpe->vlpi_count); + ret = its_get_vlpi(irq->host_irq, &map); + if (ret) + return ret; - ret = its_map_vlpi(irq->host_irq, &map); - } + if (map.vpe) + atomic_dec(&map.vpe->vlpi_count); - return ret; + map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + atomic_inc(&map.vpe->vlpi_count); + return its_map_vlpi(irq->host_irq, &map); } static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm, @@ -786,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) /* Requires the its_lock to be held. */ static void its_free_ite(struct kvm *kvm, struct its_ite *ite) { + struct vgic_irq *irq = ite->irq; list_del(&ite->ite_list); /* This put matches the get in vgic_add_lpi. */ - if (ite->irq) { - if (ite->irq->hw) - WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + if (irq) { + scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) { + if (irq->hw) + its_unmap_vlpi(ite->irq->host_irq); + + irq->hw = false; + } vgic_put_irq(kvm, ite->irq); } @@ -1938,6 +1908,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev) mutex_lock(&its->its_lock); + vgic_its_debug_destroy(kvm_dev); + vgic_its_free_device_list(kvm, its); vgic_its_free_collection_list(kvm, its); vgic_its_invalidate_cache(its); @@ -1999,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -2034,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, } out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return ret; } @@ -2704,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) mutex_lock(&kvm->lock); - if (!lock_all_vcpus(kvm)) { + if (kvm_trylock_all_vcpus(kvm)) { mutex_unlock(&kvm->lock); return -EBUSY; } @@ -2722,11 +2694,14 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) case KVM_DEV_ARM_ITS_RESTORE_TABLES: ret = abi->restore_tables(its); break; + default: + ret = -ENXIO; + break; } mutex_unlock(&its->its_lock); mutex_unlock(&kvm->arch.config_lock); - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); mutex_unlock(&kvm->lock); return ret; } @@ -2771,7 +2746,12 @@ static int vgic_its_set_attr(struct kvm_device *dev, if (ret) return ret; - return vgic_register_its_iodev(dev->kvm, its, addr); + ret = vgic_register_its_iodev(dev->kvm, its, addr); + if (ret) + return ret; + + return vgic_its_debug_init(dev); + } case KVM_DEV_ARM_VGIC_GRP_CTRL: return vgic_its_ctrl(dev->kvm, its, attr->attr); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 359094f68c23e..3d1a776b716d7 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -5,6 +5,7 @@ * Copyright (C) 2015 ARM Ltd. * Author: Marc Zyngier <marc.zyngier@arm.com> */ +#include <linux/irqchip/arm-gic-v3.h> #include <linux/kvm_host.h> #include <kvm/arm_vgic.h> #include <linux/uaccess.h> @@ -268,7 +269,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, return -ENXIO; mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -276,7 +277,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, mutex_lock(&dev->kvm->arch.config_lock); r = vgic_v3_save_pending_tables(dev->kvm); mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return r; } @@ -303,12 +304,6 @@ static int vgic_get_common_attr(struct kvm_device *dev, VGIC_NR_PRIVATE_IRQS, uaddr); break; } - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { - u32 __user *uaddr = (u32 __user *)(long)attr->addr; - - r = put_user(dev->kvm->arch.vgic.mi_intid, uaddr); - break; - } } return r; @@ -390,7 +385,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -415,7 +410,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && !is_write) @@ -510,6 +505,24 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, } /* + * Allow access to certain ID-like registers prior to VGIC initialization, + * thereby allowing the VMM to provision the features / sizing of the VGIC. + */ +static bool reg_allowed_pre_init(struct kvm_device_attr *attr) +{ + if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) + return false; + + switch (attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK) { + case GICD_IIDR: + case GICD_TYPER2: + return true; + default: + return false; + } +} + +/* * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state * * @dev: kvm device handle @@ -523,7 +536,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, struct vgic_reg_attr reg_attr; gpa_t addr; struct kvm_vcpu *vcpu; - bool uaccess, post_init = true; + bool uaccess; u32 val; int ret; @@ -539,9 +552,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, /* Sysregs uaccess is performed by the sysreg handling code */ uaccess = false; break; - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: - post_init = false; - fallthrough; default: uaccess = true; } @@ -554,14 +564,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } mutex_lock(&dev->kvm->arch.config_lock); - if (post_init != vgic_initialized(dev->kvm)) { + if (!(vgic_initialized(dev->kvm) || reg_allowed_pre_init(attr))) { ret = -EBUSY; goto out; } @@ -591,19 +601,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, } break; } - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: - if (!is_write) { - val = dev->kvm->arch.vgic.mi_intid; - ret = 0; - break; - } - - ret = -EINVAL; - if ((val < VGIC_NR_PRIVATE_IRQS) && (val >= VGIC_NR_SGIS)) { - dev->kvm->arch.vgic.mi_intid = val; - ret = 0; - } - break; default: ret = -EINVAL; break; @@ -611,7 +608,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && uaccess && !is_write) { @@ -630,8 +627,24 @@ static int vgic_v3_set_attr(struct kvm_device *dev, case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: return vgic_v3_attr_regs_access(dev, attr, true); + case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { + u32 __user *uaddr = (u32 __user *)attr->addr; + u32 val; + + if (get_user(val, uaddr)) + return -EFAULT; + + guard(mutex)(&dev->kvm->arch.config_lock); + if (vgic_initialized(dev->kvm)) + return -EBUSY; + + if (!irq_is_ppi(val)) + return -EINVAL; + + dev->kvm->arch.vgic.mi_intid = val; + return 0; + } default: return vgic_set_common_attr(dev, attr); } @@ -645,8 +658,13 @@ static int vgic_v3_get_attr(struct kvm_device *dev, case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: return vgic_v3_attr_regs_access(dev, attr, false); + case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + + guard(mutex)(&dev->kvm->arch.config_lock); + return put_user(dev->kvm->arch.vgic.mi_intid, uaddr); + } default: return vgic_get_common_attr(dev, attr); } diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index ae4c0593d1145..a3ef185209e99 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -50,8 +50,17 @@ bool vgic_has_its(struct kvm *kvm) bool vgic_supports_direct_msis(struct kvm *kvm) { - return (kvm_vgic_global_state.has_gicv4_1 || - (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm))); + return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm); +} + +bool system_supports_direct_sgis(void) +{ + return kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi(); +} + +bool vgic_supports_direct_sgis(struct kvm *kvm) +{ + return kvm->arch.vgic.nassgicap; } /* @@ -86,7 +95,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, } break; case GICD_TYPER2: - if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi()) + if (vgic_supports_direct_sgis(vcpu->kvm)) value = GICD_TYPER2_nASSGIcap; break; case GICD_IIDR: @@ -119,7 +128,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; /* Not a GICv4.1? No HW SGIs */ - if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi()) + if (!vgic_supports_direct_sgis(vcpu->kvm)) val &= ~GICD_CTLR_nASSGIreq; /* Dist stays enabled? nASSGIreq is RO */ @@ -133,7 +142,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); - if (kvm_vgic_global_state.has_gicv4_1 && + if (vgic_supports_direct_sgis(vcpu->kvm) && was_enabled != dist->enabled) kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4); else if (!was_enabled && dist->enabled) @@ -159,8 +168,18 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, switch (addr & 0x0c) { case GICD_TYPER2: - if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) + reg = vgic_mmio_read_v3_misc(vcpu, addr, len); + + if (reg == val) + return 0; + if (vgic_initialized(vcpu->kvm)) + return -EBUSY; + if ((reg ^ val) & ~GICD_TYPER2_nASSGIcap) return -EINVAL; + if (!system_supports_direct_sgis() && val) + return -EINVAL; + + dist->nassgicap = val & GICD_TYPER2_nASSGIcap; return 0; case GICD_IIDR: reg = vgic_mmio_read_v3_misc(vcpu, addr, len); @@ -178,7 +197,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, } case GICD_CTLR: /* Not a GICv4.1? No HW SGIs */ - if (!kvm_vgic_global_state.has_gicv4_1) + if (!vgic_supports_direct_sgis(vcpu->kvm)) val &= ~GICD_CTLR_nASSGIreq; dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c index bfa5bde1f1067..7f1259b49c505 100644 --- a/arch/arm64/kvm/vgic/vgic-v3-nested.c +++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c @@ -36,6 +36,11 @@ struct shadow_if { static DEFINE_PER_CPU(struct shadow_if, shadow_if); +static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx) +{ + return hweight16(shadow_if->lr_map & (BIT(idx) - 1)); +} + /* * Nesting GICv3 support * @@ -111,7 +116,7 @@ bool vgic_state_is_nested(struct kvm_vcpu *vcpu) { u64 xmo; - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + if (is_nested_ctxt(vcpu)) { xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO); WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO), "Separate virtual IRQ/FIQ settings not supported\n"); @@ -209,6 +214,29 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu) return reg; } +static u64 translate_lr_pintid(struct kvm_vcpu *vcpu, u64 lr) +{ + struct vgic_irq *irq; + + if (!(lr & ICH_LR_HW)) + return lr; + + /* We have the HW bit set, check for validity of pINTID */ + irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); + /* If there was no real mapping, nuke the HW bit */ + if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI) + lr &= ~ICH_LR_HW; + + /* Translate the virtual mapping to the real one, even if invalid */ + if (irq) { + lr &= ~ICH_LR_PHYS_ID_MASK; + lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); + vgic_put_irq(vcpu->kvm, irq); + } + + return lr; +} + /* * For LRs which have HW bit set such as timer interrupts, we modify them to * have the host hardware interrupt number instead of the virtual one programmed @@ -217,61 +245,37 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu) static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu, struct vgic_v3_cpu_if *s_cpu_if) { - unsigned long lr_map = 0; - int index = 0; + struct shadow_if *shadow_if; + + shadow_if = container_of(s_cpu_if, struct shadow_if, cpuif); + shadow_if->lr_map = 0; for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) { u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i)); - struct vgic_irq *irq; if (!(lr & ICH_LR_STATE)) - lr = 0; - - if (!(lr & ICH_LR_HW)) - goto next; - - /* We have the HW bit set, check for validity of pINTID */ - irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); - if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI ) { - /* There was no real mapping, so nuke the HW bit */ - lr &= ~ICH_LR_HW; - if (irq) - vgic_put_irq(vcpu->kvm, irq); - goto next; - } - - /* It is illegal to have the EOI bit set with HW */ - lr &= ~ICH_LR_EOI; - - /* Translate the virtual mapping to the real one */ - lr &= ~ICH_LR_PHYS_ID_MASK; - lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); + continue; - vgic_put_irq(vcpu->kvm, irq); + lr = translate_lr_pintid(vcpu, lr); -next: - s_cpu_if->vgic_lr[index] = lr; - if (lr) { - lr_map |= BIT(i); - index++; - } + s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr; + shadow_if->lr_map |= BIT(i); } - container_of(s_cpu_if, struct shadow_if, cpuif)->lr_map = lr_map; - s_cpu_if->used_lrs = index; + s_cpu_if->used_lrs = hweight16(shadow_if->lr_map); } void vgic_v3_sync_nested(struct kvm_vcpu *vcpu) { struct shadow_if *shadow_if = get_shadow_if(); - int i, index = 0; + int i; for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i)); struct vgic_irq *irq; if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE)) - goto next; + continue; /* * If we had a HW lr programmed by the guest hypervisor, we @@ -280,15 +284,13 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu) */ irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); if (WARN_ON(!irq)) /* Shouldn't happen as we check on load */ - goto next; + continue; - lr = __gic_v3_get_lr(index); + lr = __gic_v3_get_lr(lr_map_idx_to_shadow_idx(shadow_if, i)); if (!(lr & ICH_LR_STATE)) irq->active = false; vgic_put_irq(vcpu->kvm, irq); - next: - index++; } } @@ -359,25 +361,23 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu) val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2); val &= ~ICH_HCR_EL2_EOIcount_MASK; val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK); - __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val; - __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr; + __vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val); + __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr); for (i = 0; i < 4; i++) { - __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i]; - __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i]; + __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]); + __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]); } for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { val = __vcpu_sys_reg(vcpu, ICH_LRN(i)); val &= ~ICH_LR_STATE; - val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE; + val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE; - __vcpu_sys_reg(vcpu, ICH_LRN(i)) = val; - s_cpu_if->vgic_lr[i] = 0; + __vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val); } - shadow_if->lr_map = 0; vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0; } @@ -401,9 +401,7 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu) { bool level; - level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En; - if (level) - level &= vgic_v3_get_misr(vcpu); + level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu); kvm_vgic_inject_irq(vcpu->kvm, vcpu, vcpu->kvm->arch.vgic.mi_intid, level, vcpu); } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c7de6154627c4..4d9343d2b0b15 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -356,7 +356,7 @@ int vgic_v4_put(struct kvm_vcpu *vcpu) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) + if (!vgic_supports_direct_irqs(vcpu->kvm) || !vpe->resident) return 0; return its_make_vpe_non_resident(vpe, vgic_v4_want_doorbell(vcpu)); @@ -367,7 +367,7 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; int err; - if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) + if (!vgic_supports_direct_irqs(vcpu->kvm) || vpe->resident) return 0; if (vcpu_get_flag(vcpu, IN_WFI)) @@ -444,7 +444,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, if (IS_ERR(its)) return 0; - mutex_lock(&its->its_lock); + guard(mutex)(&its->its_lock); /* * Perform the actual DevID/EventID -> LPI translation. @@ -455,11 +455,13 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, */ if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, irq_entry->msi.data, &irq)) - goto out; + return 0; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); /* Silently exit if the vLPI is already mapped */ if (irq->hw) - goto out; + goto out_unlock_irq; /* * Emit the mapping request. If it fails, the ITS probably @@ -479,68 +481,72 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, ret = its_map_vlpi(virq, &map); if (ret) - goto out; + goto out_unlock_irq; irq->hw = true; irq->host_irq = virq; atomic_inc(&map.vpe->vlpi_count); /* Transfer pending state */ - raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->pending_latch) { - ret = irq_set_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - irq->pending_latch); - WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); + if (!irq->pending_latch) + goto out_unlock_irq; - /* - * Clear pending_latch and communicate this state - * change via vgic_queue_irq_unlock. - */ - irq->pending_latch = false; - vgic_queue_irq_unlock(kvm, irq, flags); - } else { - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - } + ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); -out: - mutex_unlock(&its->its_lock); + /* + * Clear pending_latch and communicate this state + * change via vgic_queue_irq_unlock. + */ + irq->pending_latch = false; + vgic_queue_irq_unlock(kvm, irq, flags); + return ret; + +out_unlock_irq: + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); return ret; } -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, - struct kvm_kernel_irq_routing_entry *irq_entry) +static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) { - struct vgic_its *its; struct vgic_irq *irq; - int ret; + unsigned long idx; - if (!vgic_supports_direct_msis(kvm)) - return 0; + guard(rcu)(); + xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) { + if (!irq->hw || irq->host_irq != host_irq) + continue; - /* - * Get the ITS, and escape early on error (not a valid - * doorbell for any of our vITSs). - */ - its = vgic_get_its(kvm, irq_entry); - if (IS_ERR(its)) - return 0; + if (!vgic_try_get_irq_kref(irq)) + return NULL; - mutex_lock(&its->its_lock); + return irq; + } - ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, - irq_entry->msi.data, &irq); - if (ret) - goto out; + return NULL; +} + +void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) +{ + struct vgic_irq *irq; + unsigned long flags; + + if (!vgic_supports_direct_msis(kvm)) + return; - WARN_ON(irq->hw && irq->host_irq != virq); + irq = __vgic_host_irq_get_vlpi(kvm, host_irq); + if (!irq) + return; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); + WARN_ON(irq->hw && irq->host_irq != host_irq); if (irq->hw) { atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); irq->hw = false; - ret = its_unmap_vlpi(virq); + its_unmap_vlpi(host_irq); } -out: - mutex_unlock(&its->its_lock); - return ret; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(kvm, irq); } diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c new file mode 100644 index 0000000000000..6bdbb221bcde7 --- /dev/null +++ b/arch/arm64/kvm/vgic/vgic-v5.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <kvm/arm_vgic.h> +#include <linux/irqchip/arm-vgic-info.h> + +#include "vgic.h" + +/* + * Probe for a vGICv5 compatible interrupt controller, returning 0 on success. + * Currently only supports GICv3-based VMs on a GICv5 host, and hence only + * registers a VGIC_V3 device. + */ +int vgic_v5_probe(const struct gic_kvm_info *info) +{ + u64 ich_vtr_el2; + int ret; + + if (!info->has_gcie_v3_compat) + return -ENODEV; + + kvm_vgic_global_state.type = VGIC_V5; + kvm_vgic_global_state.has_gcie_v3_compat = true; + + /* We only support v3 compat mode - use vGICv3 limits */ + kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; + + kvm_vgic_global_state.vcpu_base = 0; + kvm_vgic_global_state.vctrl_base = NULL; + kvm_vgic_global_state.can_emulate_gicv2 = false; + kvm_vgic_global_state.has_gicv4 = false; + kvm_vgic_global_state.has_gicv4_1 = false; + + ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config); + kvm_vgic_global_state.ich_vtr_el2 = (u32)ich_vtr_el2; + + /* + * The ListRegs field is 5 bits, but there is an architectural + * maximum of 16 list registers. Just ignore bit 4... + */ + kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; + + ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); + if (ret) { + kvm_err("Cannot register GICv3-legacy KVM device.\n"); + return ret; + } + + static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif); + kvm_info("GCIE legacy system register CPU interface\n"); + + return 0; +} diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 8f8096d489252..f5148b38120ad 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -951,7 +951,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) * can be directly injected (GICv4). */ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && - !vgic_supports_direct_msis(vcpu->kvm)) + !vgic_supports_direct_irqs(vcpu->kvm)) return; DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); @@ -965,7 +965,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) if (can_access_vgic_from_kernel()) vgic_restore_state(vcpu); - if (vgic_supports_direct_msis(vcpu->kvm)) + if (vgic_supports_direct_irqs(vcpu->kvm)) vgic_v4_commit(vcpu); } diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 0c5a63712702b..1384a04c07848 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -64,6 +64,24 @@ KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \ KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) +#define KVM_ICC_SRE_EL2 (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | \ + ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB) +#define KVM_ICH_VTR_EL2_RES0 (ICH_VTR_EL2_DVIM | \ + ICH_VTR_EL2_A3V | \ + ICH_VTR_EL2_IDbits) +#define KVM_ICH_VTR_EL2_RES1 ICH_VTR_EL2_nV4 + +static inline u64 kvm_get_guest_vtr_el2(void) +{ + u64 vtr; + + vtr = kvm_vgic_global_state.ich_vtr_el2; + vtr &= ~KVM_ICH_VTR_EL2_RES0; + vtr |= KVM_ICH_VTR_EL2_RES1; + + return vtr; +} + /* * As per Documentation/virt/kvm/devices/arm-vgic-its.rst, * below macros are defined for ITS table entry encoding. @@ -172,6 +190,36 @@ struct vgic_reg_attr { gpa_t addr; }; +struct its_device { + struct list_head dev_list; + + /* the head for the list of ITTEs */ + struct list_head itt_head; + u32 num_eventid_bits; + gpa_t itt_addr; + u32 device_id; +}; + +#define COLLECTION_NOT_MAPPED ((u32)~0) + +struct its_collection { + struct list_head coll_list; + + u32 collection_id; + u32 target_addr; +}; + +#define its_is_collection_mapped(coll) ((coll) && \ + ((coll)->target_addr != COLLECTION_NOT_MAPPED)) + +struct its_ite { + struct list_head ite_list; + + struct vgic_irq *irq; + struct its_collection *collection; + u32 event_id; +}; + int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr); int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, @@ -267,6 +315,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr, bool is_write); int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz); int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, u32 intid, u32 *val); int kvm_register_vgic_device(unsigned long type); @@ -278,6 +327,8 @@ int vgic_init(struct kvm *kvm); void vgic_debug_init(struct kvm *kvm); void vgic_debug_destroy(struct kvm *kvm); +int vgic_v5_probe(const struct gic_kvm_info *info); + static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) { struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; @@ -339,7 +390,23 @@ void vgic_its_invalidate_all_caches(struct kvm *kvm); int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq); int vgic_its_invall(struct kvm_vcpu *vcpu); +bool system_supports_direct_sgis(void); bool vgic_supports_direct_msis(struct kvm *kvm); +bool vgic_supports_direct_sgis(struct kvm *kvm); + +static inline bool vgic_supports_direct_irqs(struct kvm *kvm) +{ + /* + * Deliberately conflate vLPI and vSGI support on GICv4.1 hardware, + * indirectly allowing userspace to control whether or not vPEs are + * allocated for the VM. + */ + if (system_supports_direct_sgis()) + return vgic_supports_direct_sgis(kvm); + + return vgic_supports_direct_msis(kvm); +} + int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); @@ -359,4 +426,18 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu); void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu); void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu); +static inline bool vgic_is_v3_compat(struct kvm *kvm) +{ + return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) && + kvm_vgic_global_state.has_gcie_v3_compat; +} + +static inline bool vgic_is_v3(struct kvm *kvm) +{ + return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm); +} + +int vgic_its_debug_init(struct kvm_device *dev); +void vgic_its_debug_destroy(struct kvm_device *dev); + #endif |