summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRemy Noel <mocramis@gmail.com>2018-01-20 11:41:39 +0100
committerRemy Noel <mocramis@gmail.com>2018-02-22 23:33:24 +0100
commit652168fe3d867eec17ac7fa318c8743d524ef40f (patch)
tree0d16e8f3f07c99f0f5b78c74794686651cde4aed /kern
parente363294da50ad602791ceccae7182a7d799bf032 (diff)
perfmon: split pmu into amd and intel architectures.
pmu_driver handling is now perfmon's responsibility.
Diffstat (limited to 'kern')
-rw-r--r--kern/perfmon.c55
-rw-r--r--kern/perfmon.h28
2 files changed, 70 insertions, 13 deletions
diff --git a/kern/perfmon.c b/kern/perfmon.c
index 2122710..59b7740 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -35,6 +35,7 @@
#include <kern/init.h>
#include <kern/kmem.h>
#include <kern/list.h>
+#include <kern/log.h>
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
@@ -43,7 +44,6 @@
#include <kern/thread.h>
#include <kern/xcall.h>
#include <machine/cpu.h>
-#include <machine/pmu.h>
/*
* Performance monitoring event.
@@ -180,6 +180,8 @@ struct perfmon_cpu_pmu {
struct perfmon_cpu_pmc pmcs[PERFMON_MAX_PMCS];
};
+static struct perfmon_pmu_ops pmu_driver __read_mostly = {};
+
static struct perfmon_pmu perfmon_pmu;
static struct perfmon_cpu_pmu perfmon_cpu_pmu __percpu;
@@ -202,7 +204,7 @@ perfmon_translate(unsigned int *raw_event_idp, unsigned int event_type,
*raw_event_idp = event_id;
return 0;
case PERFMON_ET_GENERIC:
- return pmu_translate(raw_event_idp, event_id);
+ return pmu_driver.translate(raw_event_idp, event_id);
default:
panic("perfmon: unsupported event type");
}
@@ -226,7 +228,7 @@ perfmon_pmc_alloc(struct perfmon_pmc **pmcp, unsigned int raw_event_id)
}
assert(i < ARRAY_SIZE(perfmon_pmu.pmcs));
- error = pmu_alloc(&pmc->id, raw_event_id);
+ error = pmu_driver.alloc(&pmc->id, raw_event_id);
if (error)
return error;
@@ -307,8 +309,9 @@ perfmon_pmc_put(struct perfmon_pmc *pmc)
assert(pmc->nr_refs != 0);
pmc->nr_refs--;
- if (pmc->nr_refs == 0)
- pmu_free(pmc->id);
+ if (pmc->nr_refs == 0) {
+ pmu_driver.free(pmc->id);
+ }
spinlock_unlock(&perfmon_pmu.lock);
}
@@ -375,9 +378,10 @@ perfmon_cpu_pmu_load(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
cpu_pmc = perfmon_cpu_pmu_get_pmc(cpu_pmu, pmc_index);
- if (cpu_pmc->nr_refs == 0)
- pmu_start(perfmon_pmu.pmcs[pmc_index].id,
- perfmon_pmu.pmcs[pmc_index].raw_event_id);
+ if (cpu_pmc->nr_refs == 0) {
+ pmu_driver.start(perfmon_pmu.pmcs[pmc_index].id,
+ perfmon_pmu.pmcs[pmc_index].raw_event_id);
+ }
cpu_pmc->nr_refs++;
}
@@ -391,9 +395,27 @@ perfmon_cpu_pmu_unload(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
assert(cpu_pmc->nr_refs != 0);
cpu_pmc->nr_refs--;
- if (cpu_pmc->nr_refs == 0)
- pmu_stop(perfmon_pmu.pmcs[pmc_index].id);
+ if (cpu_pmc->nr_refs == 0) {
+ pmu_driver.stop(perfmon_pmu.pmcs[pmc_index].id);
+ }
}
+
+int
+perfmon_pmu_register(struct perfmon_pmu_ops *driver)
+{
+ assert(driver->info && driver->translate && driver->alloc
+ && driver->free && driver->start && driver->stop);
+
+ if (pmu_driver.info) {
+ /* Already initialized */
+ assert(0);
+ return ERROR_INVAL;
+ }
+ pmu_driver = *driver;
+
+ return 0;
+}
+
static int __init
perfmon_bootstrap(void)
{
@@ -430,6 +452,12 @@ perfmon_setup(void)
percpu_var(perfmon_cpu_grouplist, i) = grouplist;
}
+
+ if (!pmu_driver.info) {
+ log_err("unable to start perfmon: no compatible pmu driver available");
+ return ERROR_NODEV;
+ }
+ pmu_driver.info();
return 0;
}
@@ -440,7 +468,8 @@ INIT_OP_DEFINE(perfmon_setup,
INIT_OP_DEP(panic_setup, true),
INIT_OP_DEP(percpu_setup, true),
INIT_OP_DEP(perfmon_bootstrap, true),
- INIT_OP_DEP(pmu_setup, true),
+ INIT_OP_DEP(pmu_intel_setup, false),
+ INIT_OP_DEP(pmu_amd_setup, false),
INIT_OP_DEP(spinlock_setup, true),
INIT_OP_DEP(thread_setup, true));
@@ -503,7 +532,7 @@ perfmon_event_sync(struct perfmon_event *event)
uint64_t count;
pmc = perfmon_pmc_from_index(event->pmc_index);
- count = pmu_read(pmc->id);
+ count = pmu_driver.read(pmc->id);
/* TODO: overflow managment. */
event->count += (count - event->prev);
event->prev = count;
@@ -768,7 +797,7 @@ perfmon_group_load(struct perfmon_group *group)
list_for_each_entry(&group->events, event, node) {
perfmon_cpu_pmu_load(cpu_pmu, event->pmc_index);
- event->prev = pmu_read(perfmon_pmu.pmcs[event->pmc_index].id);
+ event->prev = pmu_driver.read(perfmon_pmu.pmcs[event->pmc_index].id);
}
group->cpu = cpu_id();
diff --git a/kern/perfmon.h b/kern/perfmon.h
index 89fdd3d..1ab24be 100644
--- a/kern/perfmon.h
+++ b/kern/perfmon.h
@@ -52,6 +52,21 @@
#define PERFMON_EF_MASK (PERFMON_EF_KERN | PERFMON_EF_USER)
/*
+ * Pmu operations.
+ *
+ * Set by calling perfmon_register_pmu_ops.
+ */
+struct perfmon_pmu_ops {
+ void (*info)(void);
+ int (*translate)(unsigned int *raw_event_idp, unsigned int event_id);
+ int (*alloc)(unsigned int *pmc_idp, unsigned int raw_event_id);
+ void (*free)(unsigned int pmc_id);
+ void (*start)(unsigned int pmc_id, unsigned int raw_event_id);
+ void (*stop)(unsigned int pmc_id);
+ uint64_t (*read)(unsigned int pmc_id);
+};
+
+/*
* Performance monitoring event.
*
* An event describes a single, well-defined state and records its
@@ -200,4 +215,17 @@ INIT_OP_DECLARE(perfmon_bootstrap);
*/
INIT_OP_DECLARE(perfmon_setup);
+/*
+ * Register an architecture-specific driver.
+ */
+int
+perfmon_pmu_register(struct perfmon_pmu_ops *driver);
+
+/*
+ * PMU init moduls
+ * - module fully initialized
+ */
+INIT_OP_DECLARE(pmu_intel_setup);
+INIT_OP_DECLARE(pmu_amd_setup);
+
#endif /* _KERN_PERFMON_H */