1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include "util/evsel.h"
#include "util/env.h"
#include "util/pmu.h"
#include "util/pmus.h"
#include "linux/string.h"
#include "topdown.h"
#include "evsel.h"
#include "util/debug.h"
#include "env.h"
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
#define IBS_OP_L3MISSONLY (1ULL << 16)
void arch_evsel__set_sample_weight(struct evsel *evsel)
{
evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
}
/* Check whether the evsel's PMU supports the perf metrics */
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
{
struct perf_pmu *pmu;
if (!topdown_sys_has_perf_metrics())
return false;
/*
* The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
* non-hybrid machine, "cpu_core" PMU on a hybrid machine. The
* topdown_sys_has_perf_metrics checks the slots event is only available
* for the core PMU, which supports the perf metrics feature. Checking
* both the PERF_TYPE_RAW type and the slots event should be good enough
* to detect the perf metrics feature.
*/
pmu = evsel__find_pmu(evsel);
return pmu && pmu->type == PERF_TYPE_RAW;
}
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
{
if (!evsel__sys_has_perf_metrics(evsel))
return false;
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
}
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
const char *event_name;
if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
event_name = evsel__hw_names[event];
else
event_name = "unknown-hardware";
/* The PMU type is not required for the non-hybrid platform. */
if (!pmu)
return scnprintf(bf, size, "%s", event_name);
return scnprintf(bf, size, "%s/%s/",
evsel->pmu ? evsel->pmu->name : "cpu",
event_name);
}
static void ibs_l3miss_warn(void)
{
pr_warning(
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
}
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
{
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
static int warned_once;
if (warned_once || !x86__is_amd_cpu())
return;
evsel_pmu = evsel__find_pmu(evsel);
if (!evsel_pmu)
return;
ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
ibs_op_pmu = perf_pmus__find("ibs_op");
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
if (attr->config & IBS_FETCH_L3MISSONLY) {
ibs_l3miss_warn();
warned_once = 1;
}
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
if (attr->config & IBS_OP_L3MISSONLY) {
ibs_l3miss_warn();
warned_once = 1;
}
}
}
int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
{
if (!x86__is_amd_cpu())
return 0;
if (!evsel->core.attr.precise_ip &&
!(evsel->pmu && !strncmp(evsel->pmu->name, "ibs", 3)))
return 0;
/* More verbose IBS errors. */
if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
"again without the privilege modifiers (like 'k') at the end.");
}
return 0;
}
|