summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/machine/pmu.h104
-rw-r--r--arch/x86/machine/pmu_amd.c231
-rw-r--r--arch/x86/machine/pmu_intel.c (renamed from arch/x86/machine/pmu.c)291
-rw-r--r--kern/perfmon.c55
-rw-r--r--kern/perfmon.h28
6 files changed, 329 insertions, 383 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 581941f..e20a711 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -53,4 +53,5 @@ x15_SOURCES-y += \
arch/x86/machine/trap.c \
arch/x86/machine/uart.c
-x15_SOURCES-$(CONFIG_PERFMON) += arch/x86/machine/pmu.c
+x15_SOURCES-$(CONFIG_PERFMON) += arch/x86/machine/pmu_amd.c
+x15_SOURCES-$(CONFIG_PERFMON) += arch/x86/machine/pmu_intel.c
diff --git a/arch/x86/machine/pmu.h b/arch/x86/machine/pmu.h
deleted file mode 100644
index 7d0e10a..0000000
--- a/arch/x86/machine/pmu.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2014 Remy Noel.
- * Copyright (c) 2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * PMU (Processing Monitoring Unit) interface module.
- * Provides acces to harware PMCs(Performance Monitoring Counters).
- *
- * TODO Overflow handling.
- */
-
-#ifndef _X86_PMU_H
-#define _X86_PMU_H
-
-#include <stdint.h>
-
-#include <kern/init.h>
-
-/*
- * AMD raw event IDs.
- */
-#define PMU_AMD_RE_CYCLE 0
-#define PMU_AMD_RE_INSTRUCTION 1
-#define PMU_AMD_RE_CACHE_REF 2
-#define PMU_AMD_RE_CACHE_MISS 3
-#define PMU_AMD_RE_BRANCH 4
-#define PMU_AMD_RE_BRANCH_MISS 5
-#define PMU_AMD_RE_DCACHE_REF 6
-#define PMU_AMD_RE_DCACHE_MISS 7
-#define PMU_AMD_RE_IFETCH_STALL 8
-
-/*
- * Intel raw event IDs.
- */
-#define PMU_INTEL_RE_CYCLE 0
-#define PMU_INTEL_RE_REF_CYCLE 1
-#define PMU_INTEL_RE_INSTRUCTION 2
-#define PMU_INTEL_RE_CACHE_REF 3
-#define PMU_INTEL_RE_CACHE_MISS 4
-#define PMU_INTEL_RE_BRANCH 5
-#define PMU_INTEL_RE_BRANCH_MISS 6
-
-/*
- * Translate a generic event ID into a raw event ID.
- *
- * Translations are invariant.
- */
-int pmu_translate(unsigned int *raw_event_idp, unsigned int event_id);
-
-/*
- * Allocate a performance monitoring counter (PMC).
- *
- * The allocation is global to the whole system. It is the responsibility
- * of the caller to serialize calls to this function.
- */
-int pmu_alloc(unsigned int *pmc_idp, unsigned int raw_event_id);
-
-/*
- * Free a PMC.
- *
- * As for allocations, the caller must serialize calls to this function.
- */
-void pmu_free(unsigned int pmc_id);
-
-/*
- * Start a PMC.
- *
- * This function only affects the current processor. Preemption must be
- * disabled when calling this function.
- */
-void pmu_start(unsigned int pmc_id, unsigned int raw_event_id);
-
-/*
- * Stop a PMC.
- *
- * This function only affects the current processor. Preemption must be
- * disabled when calling this function.
- */
-void pmu_stop(unsigned int pmc_id);
-
-/*
- * Return the current value of the given PMC.
- */
-uint64_t pmu_read(unsigned int pmc_id);
-
-/*
- * This init operation provides :
- * - module fully initialized
- */
-INIT_OP_DECLARE(pmu_setup);
-
-#endif /* _X86_PMU_H */
diff --git a/arch/x86/machine/pmu_amd.c b/arch/x86/machine/pmu_amd.c
new file mode 100644
index 0000000..815f403
--- /dev/null
+++ b/arch/x86/machine/pmu_amd.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2014 Remy Noel.
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * AMD PMU driver module.
+ */
+
+#include <stdint.h>
+
+#include <include/assert.h>
+#include <kern/error.h>
+#include <kern/init.h>
+#include <kern/log.h>
+#include <machine/cpu.h>
+#include <kern/perfmon.h>
+
+/*
+ * AMD raw event IDs.
+ */
+#define PMU_AMD_RE_CYCLE 0
+#define PMU_AMD_RE_INSTRUCTION 1
+#define PMU_AMD_RE_CACHE_REF 2
+#define PMU_AMD_RE_CACHE_MISS 3
+#define PMU_AMD_RE_BRANCH 4
+#define PMU_AMD_RE_BRANCH_MISS 5
+#define PMU_AMD_RE_DCACHE_REF 6
+#define PMU_AMD_RE_DCACHE_MISS 7
+#define PMU_AMD_RE_IFETCH_STALL 8
+
+
+/*
+ * PMU MSR addresses
+ */
+#define PMU_AMD_MSR_PERFEVTSEL0 0xc0010000
+#define PMU_AMD_MSR_PERCTR0 0xc0010004
+
+/*
+ * Event Select Register addresses
+ */
+#define PMU_AMD_EVTSEL_USR 0x00010000
+#define PMU_AMD_EVTSEL_OS 0x00020000
+#define PMU_AMD_EVTSEL_EN 0x00400000
+
+/*
+ * AMD PMU properties seem to be identical across all processors despite
+ * many of them being implementation-specific.
+ */
+#define PMU_AMD_NR_PMCS 4
+#define PMU_AMD_PMC_WIDTH 48
+
+struct pmu_amd {
+ unsigned int pmc_bm;
+};
+
+struct pmu_amd_event_code {
+ unsigned short event_select;
+ unsigned short umask;
+};
+
+static struct pmu_amd pmu_amd;
+
+/*
+ * TODO Per-family/model event availability database.
+ */
+static const struct pmu_amd_event_code pmu_amd_event_codes[] = {
+ [PMU_AMD_RE_CYCLE] = { 0x76, 0x00 },
+ [PMU_AMD_RE_INSTRUCTION] = { 0xc0, 0x00 },
+ [PMU_AMD_RE_CACHE_REF] = { 0x80, 0x00 },
+ [PMU_AMD_RE_CACHE_MISS] = { 0x81, 0x00 },
+ [PMU_AMD_RE_BRANCH] = { 0xc2, 0x00 },
+ [PMU_AMD_RE_BRANCH_MISS] = { 0xc3, 0x00 },
+ [PMU_AMD_RE_DCACHE_REF] = { 0x40, 0x00 },
+ [PMU_AMD_RE_DCACHE_MISS] = { 0x41, 0x00 },
+ [PMU_AMD_RE_IFETCH_STALL] = { 0x87, 0x00 },
+};
+
+#define PMU_AMD_RE_INVALID ((unsigned int)-1)
+
+static const unsigned int pmu_amd_generic_events[] = {
+ [PERFMON_EV_CYCLE] = PMU_AMD_RE_CYCLE,
+ [PERFMON_EV_REF_CYCLE] = PMU_AMD_RE_INVALID,
+ [PERFMON_EV_INSTRUCTION] = PMU_AMD_RE_INSTRUCTION,
+ [PERFMON_EV_CACHE_REF] = PMU_AMD_RE_CACHE_REF,
+ [PERFMON_EV_CACHE_MISS] = PMU_AMD_RE_CACHE_MISS,
+ [PERFMON_EV_BRANCH] = PMU_AMD_RE_BRANCH,
+ [PERFMON_EV_BRANCH_MISS] = PMU_AMD_RE_BRANCH_MISS,
+};
+
+static struct pmu_amd *
+pmu_amd_get(void)
+{
+ return &pmu_amd;
+}
+
+static void
+pmu_amd_info(void)
+{
+ log_info("pmu: driver: amd, nr_pmcs: %u, pmc_width: %u\n",
+ PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH);
+}
+
+static int
+pmu_amd_translate(unsigned int *raw_event_idp, unsigned int event_id)
+{
+ assert(event_id < ARRAY_SIZE(pmu_amd_generic_events));
+ *raw_event_idp = pmu_amd_generic_events[event_id];
+ return 0;
+}
+
+static int
+pmu_amd_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
+{
+ struct pmu_amd *pmu;
+ unsigned int pmc_id;
+
+ /* TODO Check raw event availability */
+ (void)raw_event_id;
+
+ pmu = pmu_amd_get();
+
+ if (pmu->pmc_bm == 0)
+ return ERROR_AGAIN;
+
+ pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
+ pmu->pmc_bm &= ~(1U << pmc_id);
+ *pmc_idp = pmc_id;
+ return 0;
+}
+
+static void
+pmu_amd_free(unsigned int pmc_id)
+{
+ struct pmu_amd *pmu;
+ unsigned int mask;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+
+ pmu = pmu_amd_get();
+ mask = (1U << pmc_id);
+ assert(!(pmu->pmc_bm & mask));
+ pmu->pmc_bm |= mask;
+}
+
+static void
+pmu_amd_start(unsigned int pmc_id, unsigned int raw_event_id)
+{
+ const struct pmu_amd_event_code *code;
+ uint32_t high, low;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+ assert(raw_event_id < ARRAY_SIZE(pmu_amd_event_codes));
+
+ code = &pmu_amd_event_codes[raw_event_id];
+
+ /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
+ high = code->event_select >> 8;
+ low = PMU_AMD_EVTSEL_EN
+ | PMU_AMD_EVTSEL_OS
+ | PMU_AMD_EVTSEL_USR
+ | (code->umask << 8)
+ | (code->event_select & 0xff);
+ cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, high, low);
+}
+
+static void
+pmu_amd_stop(unsigned int pmc_id)
+{
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+ cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, 0, 0);
+}
+
+static uint64_t
+pmu_amd_read(unsigned int pmc_id)
+{
+ uint32_t high, low;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+
+ cpu_get_msr(PMU_AMD_MSR_PERCTR0 + pmc_id, &high, &low);
+ return (((uint64_t)high << 32) | low);
+}
+
+static int __init
+pmu_amd_setup(void)
+{
+ const struct cpu *cpu;
+ struct pmu_amd *pmu;
+ struct perfmon_pmu_ops pmu_driver;
+
+ cpu = cpu_current();
+
+ if (cpu->vendor_id != CPU_VENDOR_AMD) {
+ return 0;
+ }
+
+ /* Support AMD Family 10h processors and later */
+ if (cpu->family < 16)
+ return ERROR_NODEV;
+
+ pmu = pmu_amd_get();
+ pmu->pmc_bm = (1U << PMU_AMD_NR_PMCS) - 1;
+
+ pmu_driver.info = pmu_amd_info;
+ pmu_driver.translate = pmu_amd_translate;
+ pmu_driver.alloc = pmu_amd_alloc;
+ pmu_driver.free = pmu_amd_free;
+ pmu_driver.start = pmu_amd_start;
+ pmu_driver.stop = pmu_amd_stop;
+ pmu_driver.read = pmu_amd_read;
+
+ return perfmon_pmu_register(&pmu_driver);
+}
+
+INIT_OP_DEFINE(pmu_amd_setup,
+ INIT_OP_DEP(perfmon_bootstrap, true),
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(log_setup, true));
+
diff --git a/arch/x86/machine/pmu.c b/arch/x86/machine/pmu_intel.c
index 9274095..e4d3f21 100644
--- a/arch/x86/machine/pmu.c
+++ b/arch/x86/machine/pmu_intel.c
@@ -14,6 +14,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * INTEL PMU driver module.
*/
#include <stdint.h>
@@ -22,202 +24,29 @@
#include <kern/error.h>
#include <kern/init.h>
#include <kern/log.h>
-#include <kern/perfmon.h>
-#include <kern/thread.h>
#include <machine/cpu.h>
-#include <machine/page.h>
-#include <machine/pmu.h>
-
-struct pmu_driver {
- void (*info)(void);
- int (*translate)(unsigned int *raw_event_idp, unsigned int event_id);
- int (*alloc)(unsigned int *pmc_idp, unsigned int raw_event_id);
- void (*free)(unsigned int pmc_id);
- void (*start)(unsigned int pmc_id, unsigned int raw_event_id);
- void (*stop)(unsigned int pmc_id);
- uint64_t (*read)(unsigned int pmc_id);
-};
-
-static struct pmu_driver pmu_driver __read_mostly;
-
-
-/* AMD driver */
-
-#define PMU_AMD_MSR_PERFEVTSEL0 0xc0010000
-#define PMU_AMD_MSR_PERCTR0 0xc0010004
-
-#define PMU_AMD_EVTSEL_USR 0x00010000
-#define PMU_AMD_EVTSEL_OS 0x00020000
-#define PMU_AMD_EVTSEL_EN 0x00400000
+#include <kern/perfmon.h>
/*
- * AMD PMU properties seem to be identical across all processors despite
- * many of them being implementation-specific.
+ * Intel raw event IDs.
*/
-#define PMU_AMD_NR_PMCS 4
-#define PMU_AMD_PMC_WIDTH 48
-
-struct pmu_amd {
- unsigned int pmc_bm;
-};
-
-struct pmu_amd_event_code {
- unsigned short event_select;
- unsigned short umask;
-};
-
-static struct pmu_amd pmu_amd;
+#define PMU_INTEL_RE_CYCLE 0
+#define PMU_INTEL_RE_REF_CYCLE 1
+#define PMU_INTEL_RE_INSTRUCTION 2
+#define PMU_INTEL_RE_CACHE_REF 3
+#define PMU_INTEL_RE_CACHE_MISS 4
+#define PMU_INTEL_RE_BRANCH 5
+#define PMU_INTEL_RE_BRANCH_MISS 6
/*
- * TODO Per-family/model event availability database.
+ * PMU MSR addresses
*/
-static const struct pmu_amd_event_code pmu_amd_event_codes[] = {
- [PMU_AMD_RE_CYCLE] = { 0x76, 0x00 },
- [PMU_AMD_RE_INSTRUCTION] = { 0xc0, 0x00 },
- [PMU_AMD_RE_CACHE_REF] = { 0x80, 0x00 },
- [PMU_AMD_RE_CACHE_MISS] = { 0x81, 0x00 },
- [PMU_AMD_RE_BRANCH] = { 0xc2, 0x00 },
- [PMU_AMD_RE_BRANCH_MISS] = { 0xc3, 0x00 },
- [PMU_AMD_RE_DCACHE_REF] = { 0x40, 0x00 },
- [PMU_AMD_RE_DCACHE_MISS] = { 0x41, 0x00 },
- [PMU_AMD_RE_IFETCH_STALL] = { 0x87, 0x00 },
-};
-
-#define PMU_AMD_RE_INVALID ((unsigned int)-1)
-
-static const unsigned int pmu_amd_generic_events[] = {
- [PERFMON_EV_CYCLE] = PMU_AMD_RE_CYCLE,
- [PERFMON_EV_REF_CYCLE] = PMU_AMD_RE_INVALID,
- [PERFMON_EV_INSTRUCTION] = PMU_AMD_RE_INSTRUCTION,
- [PERFMON_EV_CACHE_REF] = PMU_AMD_RE_CACHE_REF,
- [PERFMON_EV_CACHE_MISS] = PMU_AMD_RE_CACHE_MISS,
- [PERFMON_EV_BRANCH] = PMU_AMD_RE_BRANCH,
- [PERFMON_EV_BRANCH_MISS] = PMU_AMD_RE_BRANCH_MISS,
-};
-
-static struct pmu_amd *
-pmu_amd_get(void)
-{
- return &pmu_amd;
-}
-
-static void
-pmu_amd_info(void)
-{
- log_info("pmu: driver: amd, nr_pmcs: %u, pmc_width: %u\n",
- PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH);
-}
-
-static int
-pmu_amd_translate(unsigned int *raw_event_idp, unsigned int event_id)
-{
- assert(event_id < ARRAY_SIZE(pmu_amd_generic_events));
- *raw_event_idp = pmu_amd_generic_events[event_id];
- return 0;
-}
-
-static int
-pmu_amd_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
-{
- struct pmu_amd *pmu;
- unsigned int pmc_id;
-
- /* TODO Check raw event availability */
- (void)raw_event_id;
-
- pmu = pmu_amd_get();
-
- if (pmu->pmc_bm == 0)
- return ERROR_AGAIN;
-
- pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
- pmu->pmc_bm &= ~(1U << pmc_id);
- *pmc_idp = pmc_id;
- return 0;
-}
-
-static void
-pmu_amd_free(unsigned int pmc_id)
-{
- struct pmu_amd *pmu;
- unsigned int mask;
-
- assert(pmc_id < PMU_AMD_NR_PMCS);
-
- pmu = pmu_amd_get();
- mask = (1U << pmc_id);
- assert(!(pmu->pmc_bm & mask));
- pmu->pmc_bm |= mask;
-}
-
-static void
-pmu_amd_start(unsigned int pmc_id, unsigned int raw_event_id)
-{
- const struct pmu_amd_event_code *code;
- uint32_t high, low;
-
- assert(pmc_id < PMU_AMD_NR_PMCS);
- assert(raw_event_id < ARRAY_SIZE(pmu_amd_event_codes));
-
- code = &pmu_amd_event_codes[raw_event_id];
-
- /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
- high = code->event_select >> 8;
- low = PMU_AMD_EVTSEL_EN
- | PMU_AMD_EVTSEL_OS
- | PMU_AMD_EVTSEL_USR
- | (code->umask << 8)
- | (code->event_select & 0xff);
- cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, high, low);
-}
-
-static void
-pmu_amd_stop(unsigned int pmc_id)
-{
- assert(pmc_id < PMU_AMD_NR_PMCS);
- cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, 0, 0);
-}
-
-static uint64_t
-pmu_amd_read(unsigned int pmc_id)
-{
- uint32_t high, low;
-
- assert(pmc_id < PMU_AMD_NR_PMCS);
-
- cpu_get_msr(PMU_AMD_MSR_PERCTR0 + pmc_id, &high, &low);
- return (((uint64_t)high << 32) | low);
-}
-
-static int __init
-pmu_amd_setup(const struct cpu *cpu)
-{
- struct pmu_amd *pmu;
-
- /* Support AMD Family 10h processors and later */
- if (cpu->family < 16)
- return ERROR_NODEV;
-
- pmu = pmu_amd_get();
- pmu->pmc_bm = (1U << PMU_AMD_NR_PMCS) - 1;
-
- pmu_driver.info = pmu_amd_info;
- pmu_driver.translate = pmu_amd_translate;
- pmu_driver.alloc = pmu_amd_alloc;
- pmu_driver.free = pmu_amd_free;
- pmu_driver.start = pmu_amd_start;
- pmu_driver.stop = pmu_amd_stop;
- pmu_driver.read = pmu_amd_read;
-
- return 0;
-}
-
-
-/* Intel driver */
-
#define PMU_INTEL_MSR_PMC0 0x0c1
#define PMU_INTEL_MSR_EVTSEL0 0x186
+/*
+ * Event Select Register addresses
+ */
#define PMU_INTEL_EVTSEL_USR 0x00010000
#define PMU_INTEL_EVTSEL_OS 0x00020000
#define PMU_INTEL_EVTSEL_EN 0x00400000
@@ -377,13 +206,20 @@ pmu_intel_read(unsigned int pmc_id)
}
static int __init
-pmu_intel_setup(const struct cpu *cpu)
+pmu_intel_setup(void)
{
+ const struct cpu *cpu;
struct pmu_intel *pmu;
+ struct perfmon_pmu_ops pmu_driver;
unsigned int eax, ebx, ecx, edx, ev_len;
+ cpu = cpu_current();
eax = 0xa;
+ if (cpu->vendor_id != CPU_VENDOR_INTEL) {
+ return 0;
+ }
+
if (cpu->cpuid_max_basic < eax)
return ERROR_NODEV;
@@ -410,86 +246,11 @@ pmu_intel_setup(const struct cpu *cpu)
pmu_driver.start = pmu_intel_start;
pmu_driver.stop = pmu_intel_stop;
pmu_driver.read = pmu_intel_read;
- return 0;
-}
-
-/* Common code */
-
-static int __init
-pmu_setup(void)
-{
- const struct cpu *cpu;
- int error;
-
- cpu = cpu_current();
-
- switch (cpu->vendor_id) {
- case CPU_VENDOR_AMD:
- error = pmu_amd_setup(cpu);
- break;
- case CPU_VENDOR_INTEL:
- error = pmu_intel_setup(cpu);
- break;
- default:
- error = ERROR_NODEV;
- }
-
- if (error) {
- return error;
- }
-
- pmu_driver.info();
-
- return 0;
+ return perfmon_pmu_register(&pmu_driver);
}
-INIT_OP_DEFINE(pmu_setup,
+INIT_OP_DEFINE(pmu_intel_setup,
+ INIT_OP_DEP(perfmon_bootstrap, true),
INIT_OP_DEP(cpu_setup, true),
INIT_OP_DEP(log_setup, true));
-
-int
-pmu_translate(unsigned int *raw_event_idp, unsigned int event_id)
-{
- if (pmu_driver.translate == NULL)
- return ERROR_NODEV;
-
- return pmu_driver.translate(raw_event_idp, event_id);
-}
-
-int
-pmu_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
-{
- assert(pmu_driver.alloc != NULL);
- return pmu_driver.alloc(pmc_idp, raw_event_id);
-}
-
-void
-pmu_free(unsigned int pmc_id)
-{
- assert(pmu_driver.free != NULL);
- pmu_driver.free(pmc_id);
-}
-
-void
-pmu_start(unsigned int pmc_id, unsigned int raw_event_id)
-{
- assert(pmu_driver.start != NULL);
- assert(!thread_preempt_enabled());
- pmu_driver.start(pmc_id, raw_event_id);
-}
-
-void
-pmu_stop(unsigned int pmc_id)
-{
- assert(pmu_driver.stop != NULL);
- assert(!thread_preempt_enabled());
- pmu_driver.stop(pmc_id);
-}
-
-uint64_t
-pmu_read(unsigned int pmc_id)
-{
- assert(pmu_driver.read != NULL);
- return pmu_driver.read(pmc_id);
-}
diff --git a/kern/perfmon.c b/kern/perfmon.c
index 2122710..59b7740 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -35,6 +35,7 @@
#include <kern/init.h>
#include <kern/kmem.h>
#include <kern/list.h>
+#include <kern/log.h>
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
@@ -43,7 +44,6 @@
#include <kern/thread.h>
#include <kern/xcall.h>
#include <machine/cpu.h>
-#include <machine/pmu.h>
/*
* Performance monitoring event.
@@ -180,6 +180,8 @@ struct perfmon_cpu_pmu {
struct perfmon_cpu_pmc pmcs[PERFMON_MAX_PMCS];
};
+static struct perfmon_pmu_ops pmu_driver __read_mostly = {};
+
static struct perfmon_pmu perfmon_pmu;
static struct perfmon_cpu_pmu perfmon_cpu_pmu __percpu;
@@ -202,7 +204,7 @@ perfmon_translate(unsigned int *raw_event_idp, unsigned int event_type,
*raw_event_idp = event_id;
return 0;
case PERFMON_ET_GENERIC:
- return pmu_translate(raw_event_idp, event_id);
+ return pmu_driver.translate(raw_event_idp, event_id);
default:
panic("perfmon: unsupported event type");
}
@@ -226,7 +228,7 @@ perfmon_pmc_alloc(struct perfmon_pmc **pmcp, unsigned int raw_event_id)
}
assert(i < ARRAY_SIZE(perfmon_pmu.pmcs));
- error = pmu_alloc(&pmc->id, raw_event_id);
+ error = pmu_driver.alloc(&pmc->id, raw_event_id);
if (error)
return error;
@@ -307,8 +309,9 @@ perfmon_pmc_put(struct perfmon_pmc *pmc)
assert(pmc->nr_refs != 0);
pmc->nr_refs--;
- if (pmc->nr_refs == 0)
- pmu_free(pmc->id);
+ if (pmc->nr_refs == 0) {
+ pmu_driver.free(pmc->id);
+ }
spinlock_unlock(&perfmon_pmu.lock);
}
@@ -375,9 +378,10 @@ perfmon_cpu_pmu_load(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
cpu_pmc = perfmon_cpu_pmu_get_pmc(cpu_pmu, pmc_index);
- if (cpu_pmc->nr_refs == 0)
- pmu_start(perfmon_pmu.pmcs[pmc_index].id,
- perfmon_pmu.pmcs[pmc_index].raw_event_id);
+ if (cpu_pmc->nr_refs == 0) {
+ pmu_driver.start(perfmon_pmu.pmcs[pmc_index].id,
+ perfmon_pmu.pmcs[pmc_index].raw_event_id);
+ }
cpu_pmc->nr_refs++;
}
@@ -391,9 +395,27 @@ perfmon_cpu_pmu_unload(struct perfmon_cpu_pmu *cpu_pmu, unsigned int pmc_index)
assert(cpu_pmc->nr_refs != 0);
cpu_pmc->nr_refs--;
- if (cpu_pmc->nr_refs == 0)
- pmu_stop(perfmon_pmu.pmcs[pmc_index].id);
+ if (cpu_pmc->nr_refs == 0) {
+ pmu_driver.stop(perfmon_pmu.pmcs[pmc_index].id);
+ }
}
+
+int
+perfmon_pmu_register(struct perfmon_pmu_ops *driver)
+{
+ assert(driver->info && driver->translate && driver->alloc
+ && driver->free && driver->start && driver->stop);
+
+ if (pmu_driver.info) {
+ /* Already initialized */
+ assert(0);
+ return ERROR_INVAL;
+ }
+ pmu_driver = *driver;
+
+ return 0;
+}
+
static int __init
perfmon_bootstrap(void)
{
@@ -430,6 +452,12 @@ perfmon_setup(void)
percpu_var(perfmon_cpu_grouplist, i) = grouplist;
}
+
+ if (!pmu_driver.info) {
+ log_err("unable to start perfmon: no compatible pmu driver available");
+ return ERROR_NODEV;
+ }
+ pmu_driver.info();
return 0;
}
@@ -440,7 +468,8 @@ INIT_OP_DEFINE(perfmon_setup,
INIT_OP_DEP(panic_setup, true),
INIT_OP_DEP(percpu_setup, true),
INIT_OP_DEP(perfmon_bootstrap, true),
- INIT_OP_DEP(pmu_setup, true),
+ INIT_OP_DEP(pmu_intel_setup, false),
+ INIT_OP_DEP(pmu_amd_setup, false),
INIT_OP_DEP(spinlock_setup, true),
INIT_OP_DEP(thread_setup, true));
@@ -503,7 +532,7 @@ perfmon_event_sync(struct perfmon_event *event)
uint64_t count;
pmc = perfmon_pmc_from_index(event->pmc_index);
- count = pmu_read(pmc->id);
+ count = pmu_driver.read(pmc->id);
/* TODO: overflow managment. */
event->count += (count - event->prev);
event->prev = count;
@@ -768,7 +797,7 @@ perfmon_group_load(struct perfmon_group *group)
list_for_each_entry(&group->events, event, node) {
perfmon_cpu_pmu_load(cpu_pmu, event->pmc_index);
- event->prev = pmu_read(perfmon_pmu.pmcs[event->pmc_index].id);
+ event->prev = pmu_driver.read(perfmon_pmu.pmcs[event->pmc_index].id);
}
group->cpu = cpu_id();
diff --git a/kern/perfmon.h b/kern/perfmon.h
index 89fdd3d..1ab24be 100644
--- a/kern/perfmon.h
+++ b/kern/perfmon.h
@@ -52,6 +52,21 @@
#define PERFMON_EF_MASK (PERFMON_EF_KERN | PERFMON_EF_USER)
/*
+ * Pmu operations.
+ *
+ * Set by calling perfmon_register_pmu_ops.
+ */
+struct perfmon_pmu_ops {
+ void (*info)(void);
+ int (*translate)(unsigned int *raw_event_idp, unsigned int event_id);
+ int (*alloc)(unsigned int *pmc_idp, unsigned int raw_event_id);
+ void (*free)(unsigned int pmc_id);
+ void (*start)(unsigned int pmc_id, unsigned int raw_event_id);
+ void (*stop)(unsigned int pmc_id);
+ uint64_t (*read)(unsigned int pmc_id);
+};
+
+/*
* Performance monitoring event.
*
* An event describes a single, well-defined state and records its
@@ -200,4 +215,17 @@ INIT_OP_DECLARE(perfmon_bootstrap);
*/
INIT_OP_DECLARE(perfmon_setup);
+/*
+ * Register an architecture-specific driver.
+ */
+int
+perfmon_pmu_register(struct perfmon_pmu_ops *driver);
+
+/*
+ * PMU init moduls
+ * - module fully initialized
+ */
+INIT_OP_DECLARE(pmu_intel_setup);
+INIT_OP_DECLARE(pmu_amd_setup);
+
#endif /* _KERN_PERFMON_H */