summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell/lib/perf_metric_validation.py
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-10-06 03:59:22 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-06 03:59:22 -0400
commitc8d430db8eec7d4fd13a6bea27b7086a54eda6da (patch)
tree3c9b35bc9372232183e745cc2a03995a8d053ff6 /tools/perf/tests/shell/lib/perf_metric_validation.py
parent2a5fe5a01668e831af1de3951718fbf88b9a9b9c (diff)
parenta1d402abf8e3ff1d821e88993fc5331784fac0da (diff)
Merge tag 'kvmarm-fixes-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.12, take #1 - Fix pKVM error path on init, making sure we do not change critical system registers as we're about to fail - Make sure that the host's vector length is at capped by a value common to all CPUs - Fix kvm_has_feat*() handling of "negative" features, as the current code is pretty broken - Promote Joey to the status of official reviewer, while James steps down -- hopefully only temporarly
Diffstat (limited to 'tools/perf/tests/shell/lib/perf_metric_validation.py')
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py10
1 files changed, 8 insertions, 2 deletions
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index a2d235252183b..0b94216c9c46d 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -95,7 +95,7 @@ class Validator:
indent=4)
def get_results(self, idx: int = 0):
- return self.results[idx]
+ return self.results.get(idx)
def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
@@ -173,7 +173,10 @@ class Validator:
pcnt = 0
tcnt = 0
rerun = list()
- for name, val in self.get_results().items():
+ results = self.get_results()
+ if not results:
+ return
+ for name, val in results.items():
if val < 0:
negmetric[name] = val
rerun.append(name)
@@ -532,6 +535,9 @@ class Validator:
'''
if not self.collectlist:
self.parse_perf_metrics()
+ if not self.metrics:
+ print("No metric found for testing")
+ return 0
self.create_rules()
for i in range(0, len(self.workloads)):
self.wlidx = i