summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2025-08-06 12:56:39 -0700
committerSean Christopherson <seanjc@google.com>2025-09-18 12:56:44 -0700
commit51f34b1e650fc5843530266cea4341750bd1ae37 (patch)
tree726fb04360e99f7ad8bad81325d5327e49da39f8
parente3d1f2826da68c763cba1f29dba5cc81d3fdaaee (diff)
KVM: x86/pmu: Snapshot host (i.e. perf's) reported PMU capabilities
Take a snapshot of the unadulterated PMU capabilities provided by perf so that KVM can compare guest vPMU capabilities against hardware capabilities when determining whether or not to intercept PMU MSRs (and RDPMC). Reviewed-by: Sandipan Das <sandipan.das@amd.com> Tested-by: Xudong Hao <xudong.hao@intel.com> Link: https://lore.kernel.org/r/20250806195706.1650976-18-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/pmu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 40b076a7a5f6..5205f0d9ced9 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -26,6 +26,10 @@
/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
+/* Unadultered PMU capabilities of the host, i.e. of hardware. */
+static struct x86_pmu_capability __read_mostly kvm_host_pmu;
+
+/* KVM's PMU capabilities, i.e. the intersection of KVM and hardware support. */
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
@@ -104,6 +108,8 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
+ perf_get_x86_pmu_capability(&kvm_host_pmu);
+
/*
* Hybrid PMUs don't play nice with virtualization without careful
* configuration by userspace, and KVM's APIs for reporting supported
@@ -114,18 +120,16 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
enable_pmu = false;
if (enable_pmu) {
- perf_get_x86_pmu_capability(&kvm_pmu_cap);
-
/*
* WARN if perf did NOT disable hardware PMU if the number of
* architecturally required GP counters aren't present, i.e. if
* there are a non-zero number of counters, but fewer than what
* is architecturally required.
*/
- if (!kvm_pmu_cap.num_counters_gp ||
- WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
+ if (!kvm_host_pmu.num_counters_gp ||
+ WARN_ON_ONCE(kvm_host_pmu.num_counters_gp < min_nr_gp_ctrs))
enable_pmu = false;
- else if (is_intel && !kvm_pmu_cap.version)
+ else if (is_intel && !kvm_host_pmu.version)
enable_pmu = false;
}
@@ -134,6 +138,7 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
return;
}
+ memcpy(&kvm_pmu_cap, &kvm_host_pmu, sizeof(kvm_host_pmu));
kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
pmu_ops->MAX_NR_GP_COUNTERS);