diff options
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 285 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 86 | 
2 files changed, 258 insertions, 113 deletions
| diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index af838b8d845c..36dcfc5105be 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -96,6 +96,9 @@ static void __init its_update_mitigation(void);  static void __init its_apply_mitigation(void);  static void __init tsa_select_mitigation(void);  static void __init tsa_apply_mitigation(void); +static void __init vmscape_select_mitigation(void); +static void __init vmscape_update_mitigation(void); +static void __init vmscape_apply_mitigation(void);  /* The base value of the SPEC_CTRL MSR without task-specific bits set */  u64 x86_spec_ctrl_base; @@ -105,6 +108,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);  DEFINE_PER_CPU(u64, x86_spec_ctrl_current);  EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); +/* + * Set when the CPU has run a potentially malicious guest. An IBPB will + * be needed to before running userspace. That IBPB will flush the branch + * predictor content. + */ +DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user); +EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user); +  u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;  static u64 __ro_after_init x86_arch_cap_msr; @@ -262,6 +273,7 @@ void __init cpu_select_mitigations(void)  	its_select_mitigation();  	bhi_select_mitigation();  	tsa_select_mitigation(); +	vmscape_select_mitigation();  	/*  	 * After mitigations are selected, some may need to update their @@ -293,6 +305,7 @@ void __init cpu_select_mitigations(void)  	bhi_update_mitigation();  	/* srso_update_mitigation() depends on retbleed_update_mitigation(). */  	srso_update_mitigation(); +	vmscape_update_mitigation();  	spectre_v1_apply_mitigation();  	spectre_v2_apply_mitigation(); @@ -310,6 +323,7 @@ void __init cpu_select_mitigations(void)  	its_apply_mitigation();  	bhi_apply_mitigation();  	tsa_apply_mitigation(); +	vmscape_apply_mitigation();  }  /* @@ -2538,88 +2552,6 @@ static void update_mds_branch_idle(void)  	}  } -#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" -#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" -#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" - -void cpu_bugs_smt_update(void) -{ -	mutex_lock(&spec_ctrl_mutex); - -	if (sched_smt_active() && unprivileged_ebpf_enabled() && -	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) -		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); - -	switch (spectre_v2_user_stibp) { -	case SPECTRE_V2_USER_NONE: -		break; -	case SPECTRE_V2_USER_STRICT: -	case SPECTRE_V2_USER_STRICT_PREFERRED: -		update_stibp_strict(); -		break; -	case SPECTRE_V2_USER_PRCTL: -	case SPECTRE_V2_USER_SECCOMP: -		update_indir_branch_cond(); -		break; -	} - -	switch (mds_mitigation) { -	case MDS_MITIGATION_FULL: -	case MDS_MITIGATION_AUTO: -	case MDS_MITIGATION_VMWERV: -		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) -			pr_warn_once(MDS_MSG_SMT); -		update_mds_branch_idle(); -		break; -	case MDS_MITIGATION_OFF: -		break; -	} - -	switch (taa_mitigation) { -	case TAA_MITIGATION_VERW: -	case TAA_MITIGATION_AUTO: -	case TAA_MITIGATION_UCODE_NEEDED: -		if (sched_smt_active()) -			pr_warn_once(TAA_MSG_SMT); -		break; -	case TAA_MITIGATION_TSX_DISABLED: -	case TAA_MITIGATION_OFF: -		break; -	} - -	switch (mmio_mitigation) { -	case MMIO_MITIGATION_VERW: -	case MMIO_MITIGATION_AUTO: -	case MMIO_MITIGATION_UCODE_NEEDED: -		if (sched_smt_active()) -			pr_warn_once(MMIO_MSG_SMT); -		break; -	case MMIO_MITIGATION_OFF: -		break; -	} - -	switch (tsa_mitigation) { -	case TSA_MITIGATION_USER_KERNEL: -	case TSA_MITIGATION_VM: -	case TSA_MITIGATION_AUTO: -	case TSA_MITIGATION_FULL: -		/* -		 * TSA-SQ can potentially lead to info leakage between -		 * SMT threads. -		 */ -		if (sched_smt_active()) -			static_branch_enable(&cpu_buf_idle_clear); -		else -			static_branch_disable(&cpu_buf_idle_clear); -		break; -	case TSA_MITIGATION_NONE: -	case TSA_MITIGATION_UCODE_NEEDED: -		break; -	} - -	mutex_unlock(&spec_ctrl_mutex); -} -  #undef pr_fmt  #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt @@ -3331,8 +3263,184 @@ static void __init srso_apply_mitigation(void)  }  #undef pr_fmt +#define pr_fmt(fmt)	"VMSCAPE: " fmt + +enum vmscape_mitigations { +	VMSCAPE_MITIGATION_NONE, +	VMSCAPE_MITIGATION_AUTO, +	VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER, +	VMSCAPE_MITIGATION_IBPB_ON_VMEXIT, +}; + +static const char * const vmscape_strings[] = { +	[VMSCAPE_MITIGATION_NONE]		= "Vulnerable", +	/* [VMSCAPE_MITIGATION_AUTO] */ +	[VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER]	= "Mitigation: IBPB before exit to userspace", +	[VMSCAPE_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT", +}; + +static enum vmscape_mitigations vmscape_mitigation __ro_after_init = +	IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE; + +static int __init vmscape_parse_cmdline(char *str) +{ +	if (!str) +		return -EINVAL; + +	if (!strcmp(str, "off")) { +		vmscape_mitigation = VMSCAPE_MITIGATION_NONE; +	} else if (!strcmp(str, "ibpb")) { +		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; +	} else if (!strcmp(str, "force")) { +		setup_force_cpu_bug(X86_BUG_VMSCAPE); +		vmscape_mitigation = VMSCAPE_MITIGATION_AUTO; +	} else { +		pr_err("Ignoring unknown vmscape=%s option.\n", str); +	} + +	return 0; +} +early_param("vmscape", vmscape_parse_cmdline); + +static void __init vmscape_select_mitigation(void) +{ +	if (cpu_mitigations_off() || +	    !boot_cpu_has_bug(X86_BUG_VMSCAPE) || +	    !boot_cpu_has(X86_FEATURE_IBPB)) { +		vmscape_mitigation = VMSCAPE_MITIGATION_NONE; +		return; +	} + +	if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) +		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; +} + +static void __init vmscape_update_mitigation(void) +{ +	if (!boot_cpu_has_bug(X86_BUG_VMSCAPE)) +		return; + +	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || +	    srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT) +		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT; + +	pr_info("%s\n", vmscape_strings[vmscape_mitigation]); +} + +static void __init vmscape_apply_mitigation(void) +{ +	if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) +		setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); +} + +#undef pr_fmt  #define pr_fmt(fmt) fmt +#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" +#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" +#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n" + +void cpu_bugs_smt_update(void) +{ +	mutex_lock(&spec_ctrl_mutex); + +	if (sched_smt_active() && unprivileged_ebpf_enabled() && +	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) +		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); + +	switch (spectre_v2_user_stibp) { +	case SPECTRE_V2_USER_NONE: +		break; +	case SPECTRE_V2_USER_STRICT: +	case SPECTRE_V2_USER_STRICT_PREFERRED: +		update_stibp_strict(); +		break; +	case SPECTRE_V2_USER_PRCTL: +	case SPECTRE_V2_USER_SECCOMP: +		update_indir_branch_cond(); +		break; +	} + +	switch (mds_mitigation) { +	case MDS_MITIGATION_FULL: +	case MDS_MITIGATION_AUTO: +	case MDS_MITIGATION_VMWERV: +		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) +			pr_warn_once(MDS_MSG_SMT); +		update_mds_branch_idle(); +		break; +	case MDS_MITIGATION_OFF: +		break; +	} + +	switch (taa_mitigation) { +	case TAA_MITIGATION_VERW: +	case TAA_MITIGATION_AUTO: +	case TAA_MITIGATION_UCODE_NEEDED: +		if (sched_smt_active()) +			pr_warn_once(TAA_MSG_SMT); +		break; +	case TAA_MITIGATION_TSX_DISABLED: +	case TAA_MITIGATION_OFF: +		break; +	} + +	switch (mmio_mitigation) { +	case MMIO_MITIGATION_VERW: +	case MMIO_MITIGATION_AUTO: +	case MMIO_MITIGATION_UCODE_NEEDED: +		if (sched_smt_active()) +			pr_warn_once(MMIO_MSG_SMT); +		break; +	case MMIO_MITIGATION_OFF: +		break; +	} + +	switch (tsa_mitigation) { +	case TSA_MITIGATION_USER_KERNEL: +	case TSA_MITIGATION_VM: +	case TSA_MITIGATION_AUTO: +	case TSA_MITIGATION_FULL: +		/* +		 * TSA-SQ can potentially lead to info leakage between +		 * SMT threads. +		 */ +		if (sched_smt_active()) +			static_branch_enable(&cpu_buf_idle_clear); +		else +			static_branch_disable(&cpu_buf_idle_clear); +		break; +	case TSA_MITIGATION_NONE: +	case TSA_MITIGATION_UCODE_NEEDED: +		break; +	} + +	switch (vmscape_mitigation) { +	case VMSCAPE_MITIGATION_NONE: +	case VMSCAPE_MITIGATION_AUTO: +		break; +	case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT: +	case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER: +		/* +		 * Hypervisors can be attacked across-threads, warn for SMT when +		 * STIBP is not already enabled system-wide. +		 * +		 * Intel eIBRS (!AUTOIBRS) implies STIBP on. +		 */ +		if (!sched_smt_active() || +		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || +		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || +		    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && +		     !boot_cpu_has(X86_FEATURE_AUTOIBRS))) +			break; +		pr_warn_once(VMSCAPE_MSG_SMT); +		break; +	} + +	mutex_unlock(&spec_ctrl_mutex); +} +  #ifdef CONFIG_SYSFS  #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" @@ -3578,6 +3686,11 @@ static ssize_t tsa_show_state(char *buf)  	return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);  } +static ssize_t vmscape_show_state(char *buf) +{ +	return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]); +} +  static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,  			       char *buf, unsigned int bug)  { @@ -3644,6 +3757,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr  	case X86_BUG_TSA:  		return tsa_show_state(buf); +	case X86_BUG_VMSCAPE: +		return vmscape_show_state(buf); +  	default:  		break;  	} @@ -3735,6 +3851,11 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu  {  	return cpu_show_common(dev, attr, buf, X86_BUG_TSA);  } + +ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf) +{ +	return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE); +}  #endif  void __warn_thunk(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 34a054181c4d..f98ec9c7fc07 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1236,55 +1236,71 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  #define ITS_NATIVE_ONLY	BIT(9)  /* CPU is affected by Transient Scheduler Attacks */  #define TSA		BIT(10) +/* CPU is affected by VMSCAPE */ +#define VMSCAPE		BIT(11)  static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { -	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_HASWELL,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_HASWELL_L,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_HASWELL_G,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_HASWELL_X,	     X86_STEP_MAX,	MMIO), -	VULNBL_INTEL_STEPS(INTEL_BROADWELL_D,	     X86_STEP_MAX,	MMIO), -	VULNBL_INTEL_STEPS(INTEL_BROADWELL_G,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_BROADWELL_X,	     X86_STEP_MAX,	MMIO), -	VULNBL_INTEL_STEPS(INTEL_BROADWELL,	     X86_STEP_MAX,	SRBDS), -	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,		      0x5,	MMIO | RETBLEED | GDS), -	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS), -	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS), -	VULNBL_INTEL_STEPS(INTEL_SKYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS), -	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,		      0xb,	MMIO | RETBLEED | GDS | SRBDS), -	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS), -	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,		      0xc,	MMIO | RETBLEED | GDS | SRBDS), -	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS), -	VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,	     X86_STEP_MAX,	RETBLEED), +	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_HASWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_HASWELL_L,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_HASWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_HASWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_BROADWELL_D,	     X86_STEP_MAX,	MMIO | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_BROADWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_BROADWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_BROADWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,		      0x5,	MMIO | RETBLEED | GDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SKYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,		      0xb,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,		      0xc,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,	     X86_STEP_MAX,	RETBLEED | VMSCAPE),  	VULNBL_INTEL_STEPS(INTEL_ICELAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),  	VULNBL_INTEL_STEPS(INTEL_ICELAKE_D,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY),  	VULNBL_INTEL_STEPS(INTEL_ICELAKE_X,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY), -	VULNBL_INTEL_STEPS(INTEL_COMETLAKE,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), -	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,		      0x0,	MMIO | RETBLEED | ITS), -	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), +	VULNBL_INTEL_STEPS(INTEL_COMETLAKE,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,		      0x0,	MMIO | RETBLEED | ITS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),  	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),  	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),  	VULNBL_INTEL_STEPS(INTEL_LAKEFIELD,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED),  	VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), -	VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,		     ATOM,	RFDS), -	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,	     X86_STEP_MAX,	RFDS), -	VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,		     ATOM,	RFDS), -	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,	     X86_STEP_MAX,	RFDS), -	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,	     X86_STEP_MAX,	RFDS), -	VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,	RFDS), +	VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,		     ATOM,	RFDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,	     X86_STEP_MAX,	RFDS | VMSCAPE), +	VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,		     ATOM,	RFDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,	     X86_STEP_MAX,	RFDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,	     X86_STEP_MAX,	RFDS | VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M,	     X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X,   X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X,    X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X,    X86_STEP_MAX,	VMSCAPE), +	VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,	RFDS | VMSCAPE),  	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),  	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D,     X86_STEP_MAX,	MMIO | RFDS),  	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L,     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),  	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT,      X86_STEP_MAX,	RFDS),  	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D,    X86_STEP_MAX,	RFDS),  	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX,	RFDS), +	VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X,   X86_STEP_MAX,	VMSCAPE),  	VULNBL_AMD(0x15, RETBLEED),  	VULNBL_AMD(0x16, RETBLEED), -	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), -	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), -	VULNBL_AMD(0x19, SRSO | TSA), -	VULNBL_AMD(0x1a, SRSO), +	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE), +	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE), +	VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE), +	VULNBL_AMD(0x1a, SRSO | VMSCAPE),  	{}  }; @@ -1543,6 +1559,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  		}  	} +	/* +	 * Set the bug only on bare-metal. A nested hypervisor should already be +	 * deploying IBPB to isolate itself from nested guests. +	 */ +	if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) && +	    !boot_cpu_has(X86_FEATURE_HYPERVISOR)) +		setup_force_cpu_bug(X86_BUG_VMSCAPE); +  	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))  		return; | 
