summaryrefslogtreecommitdiff
path: root/arch/x86/machine/pmu_intel.c
diff options
context:
space:
mode:
authorRemy Noel <mocramis@gmail.com>2018-01-20 11:41:39 +0100
committerRemy Noel <mocramis@gmail.com>2018-02-22 23:33:24 +0100
commit652168fe3d867eec17ac7fa318c8743d524ef40f (patch)
tree0d16e8f3f07c99f0f5b78c74794686651cde4aed /arch/x86/machine/pmu_intel.c
parente363294da50ad602791ceccae7182a7d799bf032 (diff)
perfmon: split pmu into amd and intel architectures.
pmu_driver handling is now perfmon's responsibility.
Diffstat (limited to 'arch/x86/machine/pmu_intel.c')
-rw-r--r--arch/x86/machine/pmu_intel.c256
1 files changed, 256 insertions, 0 deletions
diff --git a/arch/x86/machine/pmu_intel.c b/arch/x86/machine/pmu_intel.c
new file mode 100644
index 0000000..e4d3f21
--- /dev/null
+++ b/arch/x86/machine/pmu_intel.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2014 Remy Noel.
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * INTEL PMU driver module.
+ */
+
+#include <stdint.h>
+
+#include <include/assert.h>
+#include <kern/error.h>
+#include <kern/init.h>
+#include <kern/log.h>
+#include <machine/cpu.h>
+#include <kern/perfmon.h>
+
+/*
+ * Intel raw event IDs.
+ */
+#define PMU_INTEL_RE_CYCLE 0
+#define PMU_INTEL_RE_REF_CYCLE 1
+#define PMU_INTEL_RE_INSTRUCTION 2
+#define PMU_INTEL_RE_CACHE_REF 3
+#define PMU_INTEL_RE_CACHE_MISS 4
+#define PMU_INTEL_RE_BRANCH 5
+#define PMU_INTEL_RE_BRANCH_MISS 6
+
+/*
+ * PMU MSR addresses
+ */
+#define PMU_INTEL_MSR_PMC0 0x0c1
+#define PMU_INTEL_MSR_EVTSEL0 0x186
+
+/*
+ * Event Select Register addresses
+ */
+#define PMU_INTEL_EVTSEL_USR 0x00010000
+#define PMU_INTEL_EVTSEL_OS 0x00020000
+#define PMU_INTEL_EVTSEL_EN 0x00400000
+
+#define PMU_INTEL_ID_VERSION_MASK 0x000000ff
+#define PMU_INTEL_ID_NR_PMCS_MASK 0x0000ff00
+#define PMU_INTEL_ID_NR_PMCS_OFFSET 8
+#define PMU_INTEL_ID_PMC_WIDTH_MASK 0x00ff0000
+#define PMU_INTEL_ID_PMC_WIDTH_OFFSET 16
+#define PMU_INTEL_ID_EVLEN_MASK 0xff000000
+#define PMU_INTEL_ID_EVLEN_OFFSET 24
+#define PMU_INTEL_ID_EVLEN_MAX 7
+
+struct pmu_intel {
+ unsigned int version;
+ unsigned int nr_pmcs;
+ unsigned int pmc_bm;
+ unsigned int pmc_width;
+ unsigned int events;
+};
+
+struct pmu_intel_event_code {
+ unsigned int hw_event_id;
+ unsigned short event_select;
+ unsigned short umask;
+};
+
+static struct pmu_intel pmu_intel;
+
+/*
+ * Intel hardware events.
+ */
+#define PMU_INTEL_EVENT_CYCLE 0x01
+#define PMU_INTEL_EVENT_INSTRUCTION 0x02
+#define PMU_INTEL_EVENT_REF_CYCLE 0x04
+#define PMU_INTEL_EVENT_CACHE_REF 0x08
+#define PMU_INTEL_EVENT_CACHE_MISS 0x10
+#define PMU_INTEL_EVENT_BRANCH 0x20
+#define PMU_INTEL_EVENT_BRANCH_MISS 0x40
+
+static const unsigned int pmu_intel_raw_events[] = {
+ [PERFMON_EV_CYCLE] = PMU_INTEL_RE_CYCLE,
+ [PERFMON_EV_REF_CYCLE] = PMU_INTEL_RE_REF_CYCLE,
+ [PERFMON_EV_INSTRUCTION] = PMU_INTEL_RE_INSTRUCTION,
+ [PERFMON_EV_CACHE_REF] = PMU_INTEL_RE_CACHE_REF,
+ [PERFMON_EV_CACHE_MISS] = PMU_INTEL_RE_CACHE_MISS,
+ [PERFMON_EV_BRANCH] = PMU_INTEL_RE_BRANCH,
+ [PERFMON_EV_BRANCH_MISS] = PMU_INTEL_RE_BRANCH_MISS,
+};
+
+static const struct pmu_intel_event_code pmu_intel_event_codes[] = {
+ [PMU_INTEL_RE_CYCLE] = { PMU_INTEL_EVENT_CYCLE, 0x3c, 0x00 },
+ [PMU_INTEL_RE_REF_CYCLE] = { PMU_INTEL_EVENT_REF_CYCLE, 0x3c, 0x01 },
+ [PMU_INTEL_RE_INSTRUCTION] = { PMU_INTEL_EVENT_INSTRUCTION, 0xc0, 0x00 },
+ [PMU_INTEL_RE_CACHE_REF] = { PMU_INTEL_EVENT_CACHE_REF, 0x2e, 0x4f },
+ [PMU_INTEL_RE_CACHE_MISS] = { PMU_INTEL_EVENT_CACHE_MISS, 0x2e, 0x41 },
+ [PMU_INTEL_RE_BRANCH] = { PMU_INTEL_EVENT_BRANCH, 0xc4, 0x00 },
+ [PMU_INTEL_RE_BRANCH_MISS] = { PMU_INTEL_EVENT_BRANCH_MISS, 0xc5, 0x00 },
+};
+
+static struct pmu_intel *
+pmu_intel_get(void)
+{
+ return &pmu_intel;
+}
+
+static void
+pmu_intel_info(void)
+{
+ const struct pmu_intel *pmu;
+ unsigned int nr_events;
+
+ pmu = pmu_intel_get();
+ nr_events = __builtin_popcount(pmu->events);
+ log_info("pmu: driver: intel, architectural v1\n"
+ "pmu: nr_pmcs: %u, pmc_width: %u, events: %#x, nr_events: %u\n",
+ pmu->nr_pmcs, pmu->pmc_width, pmu->events, nr_events);
+}
+
+static int
+pmu_intel_translate(unsigned int *raw_event_idp, unsigned event_id)
+{
+ if (event_id >= ARRAY_SIZE(pmu_intel_raw_events))
+ return ERROR_INVAL;
+
+ *raw_event_idp = pmu_intel_raw_events[event_id];
+
+ return 0;
+}
+
+static int
+pmu_intel_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
+{
+ struct pmu_intel *pmu;
+ unsigned int pmc_id;
+ unsigned int hw_event_id;
+
+ pmu = pmu_intel_get();
+ assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+ hw_event_id = pmu_intel_event_codes[raw_event_id].hw_event_id;
+
+ if (!(pmu->events & hw_event_id))
+ return ERROR_INVAL;
+
+ if (pmu->pmc_bm == 0)
+ return ERROR_AGAIN;
+
+ pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
+ pmu->pmc_bm &= ~(1U << pmc_id);
+ *pmc_idp = pmc_id;
+ return 0;
+}
+
+static void
+pmu_intel_free(unsigned int pmc_id)
+{
+ struct pmu_intel *pmu;
+ unsigned int mask;
+
+ pmu = pmu_intel_get();
+ mask = (1U << pmc_id);
+ assert(!(pmu->pmc_bm & mask));
+ pmu->pmc_bm |= mask;
+}
+
+static void
+pmu_intel_start(unsigned int pmc_id, unsigned int raw_event_id)
+{
+ const struct pmu_intel_event_code *code;
+ uint32_t evtsel;
+
+ assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+ code = &pmu_intel_event_codes[raw_event_id];
+
+ /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
+ evtsel = PMU_INTEL_EVTSEL_EN
+ | PMU_INTEL_EVTSEL_OS
+ | PMU_INTEL_EVTSEL_USR
+ | (code->umask << 8)
+ | code->event_select;
+ cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, evtsel);
+}
+
+static void
+pmu_intel_stop(unsigned int pmc_id)
+{
+ cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, 0);
+}
+
+static uint64_t
+pmu_intel_read(unsigned int pmc_id)
+{
+ uint32_t high, low;
+
+ cpu_get_msr(PMU_INTEL_MSR_PMC0 + pmc_id, &high, &low);
+ return (((uint64_t)high << 32) | low);
+}
+
+static int __init
+pmu_intel_setup(void)
+{
+ const struct cpu *cpu;
+ struct pmu_intel *pmu;
+ struct perfmon_pmu_ops pmu_driver;
+ unsigned int eax, ebx, ecx, edx, ev_len;
+
+ cpu = cpu_current();
+ eax = 0xa;
+
+ if (cpu->vendor_id != CPU_VENDOR_INTEL) {
+ return 0;
+ }
+
+ if (cpu->cpuid_max_basic < eax)
+ return ERROR_NODEV;
+
+ pmu = pmu_intel_get();
+ cpu_cpuid(&eax, &ebx, &ecx, &edx);
+ pmu->version = eax & PMU_INTEL_ID_VERSION_MASK;
+
+ if ((pmu->version == 0) || (pmu->version > 3))
+ return ERROR_NODEV;
+
+ pmu->nr_pmcs = (eax & PMU_INTEL_ID_NR_PMCS_MASK)
+ >> PMU_INTEL_ID_NR_PMCS_OFFSET;
+ pmu->pmc_bm = (1U << pmu->nr_pmcs ) - 1;
+ pmu->pmc_width = (eax & PMU_INTEL_ID_PMC_WIDTH_MASK)
+ >> PMU_INTEL_ID_PMC_WIDTH_OFFSET;
+ ev_len = (eax & PMU_INTEL_ID_EVLEN_MASK) >> PMU_INTEL_ID_EVLEN_OFFSET;
+ assert(ev_len <= PMU_INTEL_ID_EVLEN_MAX);
+ pmu->events = ~ebx & ((1U << ev_len) - 1);
+
+ pmu_driver.info = pmu_intel_info;
+ pmu_driver.translate = pmu_intel_translate;
+ pmu_driver.alloc = pmu_intel_alloc;
+ pmu_driver.free = pmu_intel_free;
+ pmu_driver.start = pmu_intel_start;
+ pmu_driver.stop = pmu_intel_stop;
+ pmu_driver.read = pmu_intel_read;
+
+ return perfmon_pmu_register(&pmu_driver);
+}
+
+INIT_OP_DEFINE(pmu_intel_setup,
+ INIT_OP_DEP(perfmon_bootstrap, true),
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(log_setup, true));