diff options
author | Remy Noel <mocramis@gmail.com> | 2018-04-18 19:21:42 +0200 |
---|---|---|
committer | Remy Noel <mocramis@gmail.com> | 2018-04-21 00:04:20 +0200 |
commit | 75924457863e9dc90475b5cbd9bcabfff87cfc0d (patch) | |
tree | 8c23f70d8c41a5cb17e370735f5026f7c2c94656 | |
parent | b6ee3f35eb82e826e800cdc4ba483c1428a6e6d6 (diff) |
perfmon: Add overflow handling.
Only architectural version 1 handled so far.
-rw-r--r-- | arch/x86/machine/pmu_intel.c | 36 | ||||
-rw-r--r-- | arch/x86/machine/trap.c | 2 | ||||
-rw-r--r-- | kern/perfmon.c | 36 | ||||
-rw-r--r-- | kern/perfmon.h | 16 |
4 files changed, 76 insertions, 14 deletions
diff --git a/arch/x86/machine/pmu_intel.c b/arch/x86/machine/pmu_intel.c index fcebb06..6439282 100644 --- a/arch/x86/machine/pmu_intel.c +++ b/arch/x86/machine/pmu_intel.c @@ -49,7 +49,11 @@ */ #define PMU_INTEL_EVTSEL_USR 0x00010000 #define PMU_INTEL_EVTSEL_OS 0x00020000 +#define PMU_INTEL_EVTSEL_EDGE 0x00040000 +#define PMU_INTEL_EVTSEL_PC 0x00080000 +#define PMU_INTEL_EVTSEL_INT 0x00100000 #define PMU_INTEL_EVTSEL_EN 0x00400000 +#define PMU_INTEL_EVTSEL_INV 0x00800000 #define PMU_INTEL_ID_VERSION_MASK 0x000000ff #define PMU_INTEL_ID_NR_PMCS_MASK 0x0000ff00 @@ -209,6 +213,7 @@ pmu_intel_start(unsigned int pmc_id, unsigned int raw_event_id) evtsel = PMU_INTEL_EVTSEL_EN | PMU_INTEL_EVTSEL_OS | PMU_INTEL_EVTSEL_USR + | PMU_INTEL_EVTSEL_INT | (code->umask << 8) | code->event_select; cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, evtsel); @@ -226,6 +231,36 @@ pmu_intel_read(unsigned int pmc_id) return cpu_get_msr64(PMU_INTEL_MSR_PMC0 + pmc_id); } +static void +pmu_intel_handle_of_intr_v1(struct trap_frame *frame) +{ + struct pmu_intel *pmu; + unsigned int mask; + uint64_t value; + uint64_t prev; + + (void)frame; + + pmu = pmu_intel_get(); + + for (unsigned int pmc_id = 0; pmc_id != pmu->nr_pmcs; pmc_id++) { + mask = (1U << pmc_id); + if (pmu->pmc_bm & mask) { + /* counter not enabled: can't overflow. */ + continue; + } + + value = pmu_intel_read(pmc_id); + prev = perfmon_cpu_pmc_get_prev(pmc_id); + if (prev > value) { + /* Overflow */ + perfmon_cpu_pmc_inc_of(pmc_id); + /* Prevents us from overflowing twice */ + perfmon_cpu_pmc_set_prev(pmc_id, value); + } + } +} + static uint8_t pmu_intel_get_pmc_width(void) { @@ -282,6 +317,7 @@ pmu_intel_setup(void) pmu_driver.stop = pmu_intel_stop; pmu_driver.read = pmu_intel_read; pmu_driver.get_pmc_width = pmu_intel_get_pmc_width; + pmu_driver.handle_of_intr = pmu_intel_handle_of_intr_v1; return perfmon_pmu_register(&pmu_driver); } diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c index 0e98015..101adf8 100644 --- a/arch/x86/machine/trap.c +++ b/arch/x86/machine/trap.c @@ -210,7 +210,7 @@ trap_setup(void) trap_install(TRAP_XCALL, TRAP_HF_INTR, cpu_xcall_intr); trap_install(TRAP_THREAD_SCHEDULE, TRAP_HF_INTR, cpu_thread_schedule_intr); trap_install(TRAP_CPU_HALT, TRAP_HF_INTR, cpu_halt_intr); - trap_install(TRAP_LAPIC_PMC_OF, TRAP_HF_INTR, lapic_pmc_of_intr); + trap_install(TRAP_LAPIC_PMC_OF, TRAP_HF_INTR, trap_default); trap_install(TRAP_LAPIC_TIMER, TRAP_HF_INTR, lapic_timer_intr); trap_install(TRAP_LAPIC_ERROR, TRAP_HF_INTR, lapic_error_intr); trap_install(TRAP_LAPIC_SPURIOUS, TRAP_HF_INTR, lapic_spurious_intr); diff --git a/kern/perfmon.c b/kern/perfmon.c index ed848e3..e91f3cd 100644 --- a/kern/perfmon.c +++ b/kern/perfmon.c @@ -44,6 +44,7 @@ #include <kern/thread.h> #include <kern/xcall.h> #include <machine/cpu.h> +#include <machine/trap.h> /* * Performance monitoring event. @@ -425,6 +426,16 @@ perfmon_cpu_pmc_get_prev(unsigned int pmc_id) } void +perfmon_cpu_pmc_set_prev(unsigned int pmc_id, uint64_t prev) +{ + struct perfmon_cpu_pmc *cpu_pmc; + + cpu_pmc = perfmon_cpu_pmu_get_pmc_from_id(pmc_id); + + cpu_pmc->prev_value = prev; +} + +void perfmon_cpu_pmc_inc_of(unsigned int pmc_id) { struct perfmon_cpu_pmc *cpu_pmc; @@ -462,6 +473,12 @@ perfmon_cpu_pmu_unload(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index) } } +void +perfmon_handle_of_intr(struct trap_frame *frame) +{ + pmu_driver.handle_of_intr(frame); +} + int perfmon_pmu_register(struct perfmon_pmu_ops *driver) { @@ -527,6 +544,11 @@ perfmon_setup(void) return ENODEV; } pmu_driver.info(); + if (pmu_driver.handle_of_intr) { + trap_register(TRAP_LAPIC_PMC_OF, lapic_pmc_of_intr); + } else { + log_warning("registered pmu does not handle overflow\n"); + } return 0; } @@ -540,7 +562,8 @@ INIT_OP_DEFINE(perfmon_setup, INIT_OP_DEP(pmu_intel_setup, false), INIT_OP_DEP(pmu_amd_setup, false), INIT_OP_DEP(spinlock_setup, true), - INIT_OP_DEP(thread_setup, true)); + INIT_OP_DEP(thread_setup, true), + INIT_OP_DEP(trap_setup, true)); static void perfmon_check_event_args(unsigned int type, unsigned int id, int flags) @@ -618,14 +641,9 @@ perfmon_event_sync(struct perfmon_cpu_pmu *cpu_pmu, - event->prev + count; event->overflow_id = cpu_pmc->overflow_id; } else { - event->count += (count - event->prev); + event->count += count - event->prev; } event->prev = count; - - /* Update per cpu prev value. we should use a callback "on_sync" or so - * instead as it is only necessary for certain archtectural overflow - * management.*/ - cpu_pmc->prev_value = count; } static inline int @@ -896,10 +914,6 @@ perfmon_group_load(struct perfmon_group *group) perfmon_cpu_pmu_load(cpu_pmu, event->pmc_index); event->prev = pmu_driver.read(perfmon_pmu.pmcs[event->pmc_index].id); event->overflow_id = cpu_pmu->pmcs[event->pmc_index].overflow_id; - /* Update per cpu prev value. we should use a callback "on_sync" or so - * instead as it is only necessary for certain archtectural overflow - * management.*/ - cpu_pmu->pmcs[event->pmc_index].prev_value = event->prev; } group->cpu = cpu_id(); diff --git a/kern/perfmon.h b/kern/perfmon.h index 0e5cd19..60e40e0 100644 --- a/kern/perfmon.h +++ b/kern/perfmon.h @@ -65,6 +65,7 @@ struct perfmon_pmu_ops { void (*stop)(unsigned int pmc_id); uint64_t (*read)(unsigned int pmc_id); uint8_t (*get_pmc_width)(void); + void (*handle_of_intr)(struct trap_frame *frame); }; /* @@ -217,10 +218,16 @@ INIT_OP_DECLARE(perfmon_bootstrap); INIT_OP_DECLARE(perfmon_setup); /* + * Handle overflow interrupt. + */ +void perfmon_handle_of_intr(struct trap_frame *frame); + +int perfmon_on_overflow(struct perfmon_pmu_ops *driver); + +/* * Register an architecture-specific driver. */ -int -perfmon_pmu_register(struct perfmon_pmu_ops *driver); +int perfmon_pmu_register(struct perfmon_pmu_ops *driver); /* * Get the last value of given pmc. @@ -228,6 +235,11 @@ perfmon_pmu_register(struct perfmon_pmu_ops *driver); uint64_t perfmon_cpu_pmc_get_prev(unsigned int pmc_id); /* + * Set the last value of given pmc. + */ +void perfmon_cpu_pmc_set_prev(unsigned int pmc_id, uint64_t prev); + +/* * Increment overflow counter for given pmc. */ void perfmon_cpu_pmc_inc_of(unsigned int pmc_id); |