diff options
Diffstat (limited to 'arch/arm64/include/asm/el2_setup.h')
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 66 |
1 files changed, 54 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 46033027510c..99a7c0235e6d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -24,22 +24,48 @@ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it * can reset into an UNKNOWN state and might not read as 1 until it has * been initialized explicitly. - * - * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but - * don't advertise it (they predate this relaxation). - * * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H * indicating whether the CPU is running in E2H mode. */ mrs_s x1, SYS_ID_AA64MMFR4_EL1 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH cmp x1, #0 - b.ge .LnVHE_\@ + b.lt .LnE2H0_\@ + + /* + * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised + * as such via ID_AA64MMFR4_EL1.E2H0: + * + * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to + * have HCR_EL2.E2H implemented as RAO/WI. + * + * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest + * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV + * guests on these hosts can write to HCR_EL2.E2H without + * trapping to the hypervisor, but these writes have no + * functional effect. + * + * Handle both cases by checking for an essential VHE property + * (system register remapping) to decide whether we're + * effectively VHE-only or not. + */ + msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE + isb + mov x1, #1 // Write something to FAR_EL1 + msr far_el1, x1 + isb + mov x1, #2 // Try to overwrite it via FAR_EL2 + msr far_el2, x1 + isb + mrs x1, far_el1 // If we see the latest write in FAR_EL1, + cmp x1, #2 // we can safely assume we are VHE only. + b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. +.LnE2H0_\@: orr x0, x0, #HCR_E2H -.LnVHE_\@: msr_hcr_el2 x0 isb +.LnVHE_\@: .endm .macro __init_el2_sctlr @@ -91,6 +117,14 @@ msr cntvoff_el2, xzr // Clear virtual offset .endm +/* Branch to skip_label if SPE version is less than given version */ +.macro __spe_vers_imp skip_label, version, tmp + mrs \tmp, id_aa64dfr0_el1 + ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 + cmp \tmp, \version + b.lt \skip_label +.endm + .macro __init_el2_debug mrs x1, id_aa64dfr0_el1 ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 @@ -103,8 +137,7 @@ csel x2, xzr, x0, eq // all PMU counters from EL1 /* Statistical profiling */ - ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 - cbz x0, .Lskip_spe_\@ // Skip if SPE not present + __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) @@ -263,10 +296,8 @@ mov x0, xzr mov x2, xzr - mrs x1, id_aa64dfr0_el1 - ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 - cmp x1, #3 - b.lt .Lskip_spe_fgt_\@ + /* If SPEv1p2 is implemented, */ + __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1 /* Disable PMSNEVFR_EL1 read and write traps */ orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK @@ -387,6 +418,17 @@ orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 .Lskip_pmuv3p9_\@: + /* If SPE is implemented, */ + __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1 + /* we can read PMSIDR and */ + mrs_s x1, SYS_PMSIDR_EL1 + and x1, x1, #PMSIDR_EL1_FDS + /* if FEAT_SPE_FDS is implemented, */ + cbz x1, .Lskip_spefds_\@ + /* disable traps of PMSDSFR to EL2. */ + orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1 + +.Lskip_spefds_\@: msr_s SYS_HDFGRTR2_EL2, x0 msr_s SYS_HDFGWTR2_EL2, x0 msr_s SYS_HFGRTR2_EL2, xzr |