summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kern/perfmon.c53
-rw-r--r--kern/perfmon.h10
2 files changed, 58 insertions, 5 deletions
diff --git a/kern/perfmon.c b/kern/perfmon.c
index d5d6589..ed848e3 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -382,23 +382,63 @@ perfmon_cpu_pmu_init(struct perfmon_cpu_pmu *cpu_pmu)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cpu_pmu->pmcs); i++) {
- struct perfmon_cpu_pmu *cpu_pmc;
+ struct perfmon_cpu_pmc *pmc;
- cpu_pmu = &cpu_pmu->pmcs[i];
+ pmc = &cpu_pmu->pmcs[i];
- pmc.nr_refs = 0;
+ pmc->nr_refs = 0;
pmc->prev_value = pmu_driver.read(perfmon_pmu.pmcs[i].id);
pmc->overflow_id = 0;
}
}
+static struct perfmon_cpu_pmc *
+perfmon_cpu_pmu_get_pmc_from_id(unsigned int pmc_id)
+{
+ unsigned int pmc_index;
+ struct perfmon_cpu_pmu *cpu_pmu;
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ assert(perfmon_pmc_id_to_index[pmc_id] != UINT32_MAX);
+ pmc_index = perfmon_pmc_id_to_index[pmc_id];
+
+ /* TODO: this may be called many times in a row. We may want to have it
+ * passed to the function.
+ */
+ cpu_pmu = cpu_local_ptr(perfmon_cpu_pmu);
+ cpu_pmc = &cpu_pmu->pmcs[pmc_index];
+
+ assert(cpu_pmc->nr_refs != 0);
+
+ return cpu_pmc;
+}
+
+uint64_t
+perfmon_cpu_pmc_get_prev(unsigned int pmc_id)
+{
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ cpu_pmc = perfmon_cpu_pmu_get_pmc_from_id(pmc_id);
+
+ return cpu_pmc->prev_value;
+}
+
+void
+perfmon_cpu_pmc_inc_of(unsigned int pmc_id)
+{
+ struct perfmon_cpu_pmc *cpu_pmc;
+
+ cpu_pmc = perfmon_cpu_pmu_get_pmc_from_id(pmc_id);
+ cpu_pmc->overflow_id++;
+}
+
static void
perfmon_cpu_pmu_load(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
{
struct perfmon_cpu_pmc *cpu_pmc;
- cpu_pmc = cpu_pmu->pmcs[pmc_index];
+ cpu_pmc = &cpu_pmu->pmcs[pmc_index];
if (cpu_pmc->nr_refs == 0) {
pmu_driver.start(perfmon_pmu.pmcs[pmc_index].id,
@@ -564,7 +604,7 @@ perfmon_event_sync(struct perfmon_cpu_pmu *cpu_pmu,
uint64_t count;
pmc = perfmon_pmc_from_index(event->pmc_index);
- cpu_pmc = cpu_pmu->pmcs[event->pmc_index];
+ cpu_pmc = &cpu_pmu->pmcs[event->pmc_index];
count = pmu_driver.read(pmc->id);
if (unlikely(event->overflow_id != cpu_pmc->overflow_id)) {
@@ -1018,6 +1058,9 @@ static void
perfmon_group_sync_local(struct perfmon_group *group)
{
struct perfmon_event *event;
+ struct perfmon_cpu_pmu *cpu_pmu;
+
+ cpu_pmu = cpu_local_ptr(perfmon_cpu_pmu);
/* The group sync duration *should be* limited as a group may only have a
* limited amount of *different* events.
diff --git a/kern/perfmon.h b/kern/perfmon.h
index 6a2d9c6..0e5cd19 100644
--- a/kern/perfmon.h
+++ b/kern/perfmon.h
@@ -223,6 +223,16 @@ int
perfmon_pmu_register(struct perfmon_pmu_ops *driver);
/*
+ * Get the last value of given pmc.
+ */
+uint64_t perfmon_cpu_pmc_get_prev(unsigned int pmc_id);
+
+/*
+ * Increment overflow counter for given pmc.
+ */
+void perfmon_cpu_pmc_inc_of(unsigned int pmc_id);
+
+/*
* PMU init moduls
* - module fully initialized
*/