diff options
| author | Yazen Ghannam <yazen.ghannam@amd.com> | 2025-09-08 15:40:31 +0000 | 
|---|---|---|
| committer | Borislav Petkov (AMD) <bp@alien8.de> | 2025-09-11 12:22:37 +0200 | 
| commit | 669ce4984b729ad5b4c6249d4a8721ae52398bfb (patch) | |
| tree | 1667a3dcb6651faee642987319b431b4599e3fc9 | |
| parent | cfffcf97997bd35f4a59e035523d1762568bdbad (diff) | |
x86/mce: Define BSP-only init
Currently, MCA initialization is executed identically on each CPU as
they are brought online. However, a number of MCA initialization tasks
only need to be done once.
Define a function to collect all 'global' init tasks and call this from
the BSP only. Start with CPU features.
Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: https://lore.kernel.org/20250908-wip-mca-updates-v6-0-eef5d6c74b9c@amd.com
| -rw-r--r-- | arch/x86/include/asm/mce.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mce/amd.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mce/core.c | 28 | 
4 files changed, 24 insertions, 10 deletions
| diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 3224f3862dc8..31e3cb550fb3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -241,12 +241,14 @@ struct cper_ia_proc_ctx;  #ifdef CONFIG_X86_MCE  int mcheck_init(void); +void mca_bsp_init(struct cpuinfo_x86 *c);  void mcheck_cpu_init(struct cpuinfo_x86 *c);  void mcheck_cpu_clear(struct cpuinfo_x86 *c);  int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,  			       u64 lapic_id);  #else  static inline int mcheck_init(void) { return 0; } +static inline void mca_bsp_init(struct cpuinfo_x86 *c) {}  static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}  static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}  static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 34a054181c4d..8bbfde05f04f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1784,6 +1784,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)  		setup_clear_cpu_cap(X86_FEATURE_LA57);  	detect_nopl(); +	mca_bsp_init(c);  }  void __init init_cpu_devs(void) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index aa13c9304ad8..3c6c19eb0a18 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -653,9 +653,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)  	u32 low = 0, high = 0, address = 0;  	int offset = -1; -	mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV); -	mce_flags.succor	 = cpu_feature_enabled(X86_FEATURE_SUCCOR); -	mce_flags.smca		 = cpu_feature_enabled(X86_FEATURE_SMCA);  	mce_flags.amd_threshold	 = 1;  	for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 9e31834b3542..79f3dd7f7851 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1837,13 +1837,6 @@ static void __mcheck_cpu_cap_init(void)  	this_cpu_write(mce_num_banks, b);  	__mcheck_cpu_mce_banks_init(); - -	/* Use accurate RIP reporting if available. */ -	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) -		mca_cfg.rip_msr = MSR_IA32_MCG_EIP; - -	if (cap & MCG_SER_P) -		mca_cfg.ser = 1;  }  static void __mcheck_cpu_init_generic(void) @@ -2240,6 +2233,27 @@ DEFINE_IDTENTRY_RAW(exc_machine_check)  }  #endif +void mca_bsp_init(struct cpuinfo_x86 *c) +{ +	u64 cap; + +	if (!mce_available(c)) +		return; + +	mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV); +	mce_flags.succor	 = cpu_feature_enabled(X86_FEATURE_SUCCOR); +	mce_flags.smca		 = cpu_feature_enabled(X86_FEATURE_SMCA); + +	rdmsrq(MSR_IA32_MCG_CAP, cap); + +	/* Use accurate RIP reporting if available. */ +	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) +		mca_cfg.rip_msr = MSR_IA32_MCG_EIP; + +	if (cap & MCG_SER_P) +		mca_cfg.ser = 1; +} +  /*   * Called for each booted CPU to set up machine checks.   * Must be called with preempt off: | 
