summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2025-09-30 13:23:06 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2025-09-30 13:23:06 -0400
commit8cbb0df2945a0fcb1f0b4384e65f13ec727baef4 (patch)
tree984bffa305746880f623534f0dc3717d169bc595 /arch/x86/kernel/cpu/bugs.c
parent68f6051098f9b12bf0e227f753cf60e2f751b03d (diff)
parente6157256ee1a6a500da42556e059d4dec2ade871 (diff)
Merge tag 'kvmarm-fixes-6.17-2' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 changes for 6.17, round #3 - Invalidate nested MMUs upon freeing the PGD to avoid WARNs when visiting from an MMU notifier - Fixes to the TLB match process and TLB invalidation range for managing the VCNR pseudo-TLB - Prevent SPE from erroneously profiling guests due to UNKNOWN reset values in PMSCR_EL1 - Fix save/restore of host MDCR_EL2 to account for eagerly programming at vcpu_load() on VHE systems - Correct lock ordering when dealing with VGIC LPIs, avoiding scenarios where an xarray's spinlock was nested with a *raw* spinlock - Permit stage-2 read permission aborts which are possible in the case of NV depending on the guest hypervisor's stage-2 translation - Call raw_spin_unlock() instead of the internal spinlock API - Fix parameter ordering when assigning VBAR_EL1 [Pull into kvm/master to fix conflicts. - Paolo]
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 49ef1b832c1a..af838b8d845c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -416,6 +416,10 @@ static bool __init should_mitigate_vuln(unsigned int bug)
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
(smt_mitigations != SMT_MITIGATIONS_OFF);
+
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
+
default:
WARN(1, "Unknown bug %x\n", bug);
return false;
@@ -2710,6 +2714,11 @@ static void __init ssb_select_mitigation(void)
ssb_mode = SPEC_STORE_BYPASS_DISABLE;
break;
case SPEC_STORE_BYPASS_CMD_AUTO:
+ if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
+ else
+ ssb_mode = SPEC_STORE_BYPASS_NONE;
+ break;
case SPEC_STORE_BYPASS_CMD_PRCTL:
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;