diff options
Diffstat (limited to 'arch/arm64/include/asm')
| -rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 38 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 50 | ||||
| -rw-r--r-- | arch/arm64/include/asm/pgtable.h | 3 | ||||
| -rw-r--r-- | arch/arm64/include/asm/sysreg.h | 11 | 
4 files changed, 94 insertions, 8 deletions
| diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b37da3ee8529..99a7c0235e6d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -24,22 +24,48 @@  	 * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it  	 * can reset into an UNKNOWN state and might not read as 1 until it has  	 * been initialized explicitly. -	 * -	 * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but -	 * don't advertise it (they predate this relaxation). -	 *  	 * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H  	 * indicating whether the CPU is running in E2H mode.  	 */  	mrs_s	x1, SYS_ID_AA64MMFR4_EL1  	sbfx	x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH  	cmp	x1, #0 -	b.ge	.LnVHE_\@ +	b.lt	.LnE2H0_\@ +	/* +	 * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised +	 * as such via ID_AA64MMFR4_EL1.E2H0: +	 * +	 * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to +	 *   have HCR_EL2.E2H implemented as RAO/WI. +	 * +	 * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest +	 *   reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV +	 *   guests on these hosts can write to HCR_EL2.E2H without +	 *   trapping to the hypervisor, but these writes have no +	 *   functional effect. +	 * +	 * Handle both cases by checking for an essential VHE property +	 * (system register remapping) to decide whether we're +	 * effectively VHE-only or not. +	 */ +	msr_hcr_el2 x0		// Setup HCR_EL2 as nVHE +	isb +	mov	x1, #1		// Write something to FAR_EL1 +	msr	far_el1, x1 +	isb +	mov	x1, #2		// Try to overwrite it via FAR_EL2 +	msr	far_el2, x1 +	isb +	mrs	x1, far_el1	// If we see the latest write in FAR_EL1, +	cmp	x1, #2		// we can safely assume we are VHE only. +	b.ne	.LnVHE_\@	// Otherwise, we know that nVHE works. + +.LnE2H0_\@:  	orr	x0, x0, #HCR_E2H -.LnVHE_\@:  	msr_hcr_el2 x0  	isb +.LnVHE_\@:  .endm  .macro __init_el2_sctlr diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b763293281c8..64302c438355 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -816,6 +816,11 @@ struct kvm_vcpu_arch {  	u64 hcrx_el2;  	u64 mdcr_el2; +	struct { +		u64 r; +		u64 w; +	} fgt[__NR_FGT_GROUP_IDS__]; +  	/* Exception Information */  	struct kvm_vcpu_fault_info fault; @@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void)  void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);  void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);  void check_feature_map(void); +void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu); + +static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg) +{ +	switch (reg) { +	case HFGRTR_EL2: +	case HFGWTR_EL2: +		return HFGRTR_GROUP; +	case HFGITR_EL2: +		return HFGITR_GROUP; +	case HDFGRTR_EL2: +	case HDFGWTR_EL2: +		return HDFGRTR_GROUP; +	case HAFGRTR_EL2: +		return HAFGRTR_GROUP; +	case HFGRTR2_EL2: +	case HFGWTR2_EL2: +		return HFGRTR2_GROUP; +	case HFGITR2_EL2: +		return HFGITR2_GROUP; +	case HDFGRTR2_EL2: +	case HDFGWTR2_EL2: +		return HDFGRTR2_GROUP; +	default: +		BUILD_BUG_ON(1); +	} +} +#define vcpu_fgt(vcpu, reg)						\ +	({								\ +		enum fgt_group_id id = __fgt_reg_to_group_id(reg);	\ +		u64 *p;							\ +		switch (reg) {						\ +		case HFGWTR_EL2:					\ +		case HDFGWTR_EL2:					\ +		case HFGWTR2_EL2:					\ +		case HDFGWTR2_EL2:					\ +			p = &(vcpu)->arch.fgt[id].w;			\ +			break;						\ +		default:						\ +			p = &(vcpu)->arch.fgt[id].r;			\ +			break;						\ +		}							\ +									\ +		p;							\ +	})  #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index aa89c2e67ebc..0944e296dd4a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)  static inline pte_t pte_mkwrite_novma(pte_t pte)  {  	pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); -	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); +	if (pte_sw_dirty(pte)) +		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));  	return pte;  } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6455db1b54fd..c231d2a3e515 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1220,10 +1220,19 @@  	__val;								\  }) +/* + * The "Z" constraint combined with the "%x0" template should be enough + * to force XZR generation if (v) is a constant 0 value but LLVM does not + * yet understand that modifier/constraint combo so a conditional is required + * to nudge the compiler into using XZR as a source for a 0 constant value. + */  #define write_sysreg_s(v, r) do {					\  	u64 __val = (u64)(v);						\  	u32 __maybe_unused __check_r = (u32)(r);			\ -	asm volatile(__msr_s(r, "%x0") : : "rZ" (__val));		\ +	if (__builtin_constant_p(__val) && __val == 0)			\ +		asm volatile(__msr_s(r, "xzr"));			\ +	else								\ +		asm volatile(__msr_s(r, "%x0") : : "r" (__val));	\  } while (0)  /* | 
