summaryrefslogtreecommitdiff
path: root/kern/perfmon.c
diff options
context:
space:
mode:
authorRemy Noel <mocramis@gmail.com>2018-04-06 20:01:46 +0200
committerRemy Noel <mocramis@gmail.com>2018-04-21 00:04:20 +0200
commitc59b444cf25e01de7911514af9ebc15d342197b5 (patch)
treeec997d4d1eb6433d8c977cb69a38394c409a3efa /kern/perfmon.c
parent0274c8726015e83c989aac905b6626e40397495c (diff)
kern/perfmon: add perfmon_cpu_pmc get/setters.
Diffstat (limited to 'kern/perfmon.c')
-rw-r--r--kern/perfmon.c53
1 files changed, 48 insertions, 5 deletions
diff --git a/kern/perfmon.c b/kern/perfmon.c
index d5d6589..ed848e3 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -382,23 +382,63 @@ perfmon_cpu_pmu_init(struct perfmon_cpu_pmu *cpu_pmu)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cpu_pmu->pmcs); i++) {
- struct perfmon_cpu_pmu *cpu_pmc;
+ struct perfmon_cpu_pmc *pmc;
- cpu_pmu = &cpu_pmu->pmcs[i];
+ pmc = &cpu_pmu->pmcs[i];
- pmc.nr_refs = 0;
+ pmc->nr_refs = 0;
pmc->prev_value = pmu_driver.read(perfmon_pmu.pmcs[i].id);
pmc->overflow_id = 0;
}
}
+static struct perfmon_cpu_pmc *
+perfmon_cpu_pmu_get_pmc_from_id(unsigned int pmc_id)
+{
+ unsigned int pmc_index;
+ struct perfmon_cpu_pmu *cpu_pmu;
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ assert(perfmon_pmc_id_to_index[pmc_id] != UINT32_MAX);
+ pmc_index = perfmon_pmc_id_to_index[pmc_id];
+
+ /* TODO: this may be called many times in a row. We may want to have it
+ * passed to the function.
+ */
+ cpu_pmu = cpu_local_ptr(perfmon_cpu_pmu);
+ cpu_pmc = &cpu_pmu->pmcs[pmc_index];
+
+ assert(cpu_pmc->nr_refs != 0);
+
+ return cpu_pmc;
+}
+
+uint64_t
+perfmon_cpu_pmc_get_prev(unsigned int pmc_id)
+{
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ cpu_pmc = perfmon_cpu_pmu_get_pmc_from_id(pmc_id);
+
+ return cpu_pmc->prev_value;
+}
+
+void
+perfmon_cpu_pmc_inc_of(unsigned int pmc_id)
+{
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ cpu_pmc = perfmon_cpu_pmu_get_pmc_from_id(pmc_id);
+ cpu_pmc->overflow_id++;
+}
+
static void
perfmon_cpu_pmu_load(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
{
struct perfmon_cpu_pmc *cpu_pmc;
- cpu_pmc = cpu_pmu->pmcs[pmc_index];
+ cpu_pmc = &cpu_pmu->pmcs[pmc_index];
if (cpu_pmc->nr_refs == 0) {
pmu_driver.start(perfmon_pmu.pmcs[pmc_index].id,
@@ -564,7 +604,7 @@ perfmon_event_sync(struct perfmon_cpu_pmu *cpu_pmu,
uint64_t count;
pmc = perfmon_pmc_from_index(event->pmc_index);
- cpu_pmc = cpu_pmu->pmcs[event->pmc_index];
+ cpu_pmc = &cpu_pmu->pmcs[event->pmc_index];
count = pmu_driver.read(pmc->id);
if (unlikely(event->overflow_id != cpu_pmc->overflow_id)) {
@@ -1018,6 +1058,9 @@ static void
perfmon_group_sync_local(struct perfmon_group *group)
{
struct perfmon_event *event;
+ struct perfmon_cpu_pmu *cpu_pmu;
+
+ cpu_pmu = cpu_local_ptr(perfmon_cpu_pmu);
/* The group sync duration *should be* limited as a group may only have a
* limited amount of *different* events.