summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
Diffstat (limited to 'kern')
-rw-r--r--kern/perfmon.c64
-rw-r--r--kern/perfmon.h17
2 files changed, 53 insertions, 28 deletions
diff --git a/kern/perfmon.c b/kern/perfmon.c
index 156ec7a..17175ca 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -16,7 +16,16 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
- * TODO Description.
+ * The perfomance monitoring modules allows to manage performance monitoring as
+ * event groups. Each physical performance monitoring counter (pmc) may be
+ * referenced by perfmon events, which are theelves groupped in perfmon groups.
+ * Groups can then be attached to either threads or cpus into perfmon
+ * grouplists.
+ *
+ * In order to guarantee that thread relocation, is properly handled, events
+ * types are reseved on perfomance monitoring units (pmu) for all cpus for every
+ * event of a group when it is attached. Therefore a group attach may fail if no
+ * compatible pmc is available globally.
*
* Locking order : interrupts -> thread runq -> grouplist -> group
*
@@ -188,9 +197,11 @@ struct perfmon_cpu_pmc {
*/
struct perfmon_cpu_pmu {
struct perfmon_cpu_pmc pmcs[PERFMON_MAX_PMCS];
+ struct timer of_timer;
+ unsigned int cpu_id;
};
-static struct perfmon_pmu_ops pmu_driver __read_mostly;
+static struct perfmon_pmu_driver pmu_driver __read_mostly;
static struct perfmon_pmu perfmon_pmu;
static unsigned int perfmon_pmc_id_to_index[PERFMON_MAX_PMCS];
@@ -216,7 +227,7 @@ perfmon_translate(unsigned int *raw_event_idp, unsigned int event_type,
*raw_event_idp = event_id;
return 0;
case PERFMON_ET_GENERIC:
- return pmu_driver.translate(raw_event_idp, event_id);
+ return pmu_driver.ops.translate(raw_event_idp, event_id);
default:
panic("perfmon: unsupported event type");
}
@@ -242,7 +253,7 @@ perfmon_pmc_alloc(struct perfmon_pmc **pmcp, unsigned int raw_event_id)
}
assert(i < ARRAY_SIZE(perfmon_pmu.pmcs));
- error = pmu_driver.alloc(&pmc->id, raw_event_id);
+ error = pmu_driver.ops.alloc(&pmc->id, raw_event_id);
if (error) {
return error;
@@ -345,7 +356,7 @@ perfmon_pmc_put(struct perfmon_pmc *pmc)
pmc->nr_refs--;
if (pmc->nr_refs == 0) {
- pmu_driver.free(pmc->id);
+ pmu_driver.ops.free(pmc->id);
assert(perfmon_pmc_id_to_index[pmc->id] != UINT32_MAX);
perfmon_pmc_id_to_index[pmc->id] = UINT32_MAX;
}
@@ -394,7 +405,7 @@ perfmon_cpu_pmu_init(struct perfmon_cpu_pmu *cpu_pmu)
pmc = &cpu_pmu->pmcs[i];
pmc->nr_refs = 0;
- pmc->prev_value = pmu_driver.read(perfmon_pmu.pmcs[i].id);
+ pmc->prev_value = pmu_driver.ops.read(perfmon_pmu.pmcs[i].id);
pmc->overflow_id = 0;
}
@@ -458,8 +469,8 @@ perfmon_cpu_pmu_load(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
cpu_pmc = &cpu_pmu->pmcs[pmc_index];
if (cpu_pmc->nr_refs == 0) {
- pmu_driver.start(perfmon_pmu.pmcs[pmc_index].id,
- perfmon_pmu.pmcs[pmc_index].raw_event_id);
+ pmu_driver.ops.start(perfmon_pmu.pmcs[pmc_index].id,
+ perfmon_pmu.pmcs[pmc_index].raw_event_id);
}
cpu_pmc->nr_refs++;
@@ -475,24 +486,26 @@ perfmon_cpu_pmu_unload(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
cpu_pmc->nr_refs--;
if (cpu_pmc->nr_refs == 0) {
- pmu_driver.stop(perfmon_pmu.pmcs[pmc_index].id);
+ pmu_driver.ops.stop(perfmon_pmu.pmcs[pmc_index].id);
}
}
void
perfmon_of_intr(void)
{
- pmu_driver.handle_of_intr();
+ pmu_driver.ops.handle_of_intr();
}
int
-perfmon_pmu_register(struct perfmon_pmu_ops *driver)
+perfmon_pmu_register(struct perfmon_pmu_driver *driver)
{
- assert(driver->info && driver->translate && driver->alloc
- && driver->free && driver->start && driver->stop
- && driver->get_pmc_width);
+ struct perfmon_pmu_ops *ops = &driver->ops;
+
+ assert(ops->info && ops->translate && ops->alloc
+ && ops->free && ops->start && ops->stop);
+ assert(!ops->handle_of_intr != !driver->of_max_ticks);
- if (pmu_driver.info) {
+ if (pmu_driver.ops.info) {
/* Already initialized */
assert(0);
return EINVAL;
@@ -545,15 +558,14 @@ perfmon_setup(void)
percpu_var(perfmon_cpu_grouplist, i) = grouplist;
}
- if (!pmu_driver.info) {
+ if (!pmu_driver.ops.info) {
log_err("unable to start perfmon: no compatible pmu driver available");
return ENODEV;
}
- pmu_driver.info();
- if (pmu_driver.handle_of_intr) {
+ pmu_driver.ops.info();
+ if (pmu_driver.ops.handle_of_intr) {
+ /* FIXME: this should not require an architectural api call. */
trap_register(TRAP_LAPIC_PMC_OF, lapic_pmc_of_intr);
- } else {
- log_warning("registered pmu does not handle overflow\n");
}
return 0;
@@ -623,7 +635,7 @@ perfmon_event_read(const struct perfmon_event *event)
int
perfmon_event_write(struct perfmon_event *event, uint64_t value)
{
- if (!pmu_driver.write) {
+ if (!pmu_driver.ops.write) {
return ENODEV;
}
event->value = value;
@@ -635,7 +647,7 @@ perfmon_event_write(struct perfmon_event *event, uint64_t value)
int
perfmon_get_pmc_width(void)
{
- return pmu_driver.get_pmc_width();
+ return pmu_driver.pmc_width;
}
#endif /* CONFIG_PERFMON_TEST */
@@ -657,14 +669,14 @@ perfmon_event_sync(struct perfmon_cpu_pmu *cpu_pmu,
pmc = perfmon_pmc_from_index(event->pmc_index);
cpu_pmc = &cpu_pmu->pmcs[event->pmc_index];
- count = pmu_driver.read(pmc->id);
+ count = pmu_driver.ops.read(pmc->id);
if (unlikely(event->overflow_id != cpu_pmc->overflow_id)) {
assert(cpu_pmc->overflow_id > event->overflow_id);
diff = cpu_pmc->overflow_id > event->overflow_id;
/* diff is very likely 1. */
- event->count += (1UL << pmu_driver.get_pmc_width()) * diff
+ event->count += (1UL << pmu_driver.pmc_width) * diff
- event->prev + count;
event->overflow_id = cpu_pmc->overflow_id;
} else {
@@ -949,14 +961,14 @@ perfmon_group_load(struct perfmon_group *group)
continue;
}
pmc = perfmon_pmc_from_index(event->pmc_index);
- pmu_driver.write(pmc->id, event->value);
+ pmu_driver.ops.write(pmc->id, event->value);
event->set_value = false;
}
#endif
list_for_each_entry(&group->events, event, node) {
perfmon_cpu_pmu_load(cpu_pmu, event->pmc_index);
- event->prev = pmu_driver.read(perfmon_pmu.pmcs[event->pmc_index].id);
+ event->prev = pmu_driver.ops.read(perfmon_pmu.pmcs[event->pmc_index].id);
event->overflow_id = cpu_pmu->pmcs[event->pmc_index].overflow_id;
}
diff --git a/kern/perfmon.h b/kern/perfmon.h
index b9a3882..cd7eb3a 100644
--- a/kern/perfmon.h
+++ b/kern/perfmon.h
@@ -65,11 +65,24 @@ struct perfmon_pmu_ops {
void (*stop)(unsigned int pmc_id);
uint64_t (*read)(unsigned int pmc_id);
void (*write)(unsigned int pmc_id, uint64_t value);
- uint8_t (*get_pmc_width)(void);
+ /* If set, of_max_ticks should be set to 0. */
void (*handle_of_intr)(void);
};
/*
+ * Pmu device description.
+ */
+struct perfmon_pmu_driver {
+ uint8_t pmc_width; /* width in bits of a pmc */
+ /*
+ * Maximum number of clock ticks between two overflow ckecks.
+ * Should be set to 0 if handle_of_intr is set.
+ */
+ uint64_t of_max_ticks;
+ struct perfmon_pmu_ops ops;
+};
+
+/*
* Performance monitoring event.
*
* An event describes a single, well-defined state and records its
@@ -226,7 +239,7 @@ void perfmon_of_intr(void);
/*
* Register an architecture-specific driver.
*/
-int perfmon_pmu_register(struct perfmon_pmu_ops *driver);
+int perfmon_pmu_register(struct perfmon_pmu_driver *driver);
/*
* Get the last value of given pmc.