summaryrefslogtreecommitdiff
path: root/arch/x86/machine
diff options
context:
space:
mode:
authorRemy Noel <mocramis@gmail.com>2017-10-01 15:09:38 +0200
committerRemy Noel <mocramis@gmail.com>2017-10-01 17:17:10 +0200
commit3affdd390f5e12731c92021952917bfc4843b2f7 (patch)
tree10f39eb5ba58543658dc485388662a4a18e4d42d /arch/x86/machine
parent5c209f9abd290eaf9b29f98cfd4102ed2a93507f (diff)
Perfmon module prototype
Diffstat (limited to 'arch/x86/machine')
-rw-r--r--arch/x86/machine/cpu.c52
-rw-r--r--arch/x86/machine/cpu.h23
-rw-r--r--arch/x86/machine/pmu.c495
-rw-r--r--arch/x86/machine/pmu.h104
4 files changed, 662 insertions, 12 deletions
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index e971d99..c733daa 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -69,6 +69,11 @@
#define CPU_INVALID_APIC_ID ((unsigned int)-1)
+struct cpu_vendor {
+ unsigned int id;
+ const char *str;
+};
+
/*
* MP related CMOS ports, registers and values.
*/
@@ -173,6 +178,11 @@ cpu_delay(unsigned long usecs)
} while (total > 0);
}
+static const struct cpu_vendor cpu_vendors[] = {
+ { CPU_VENDOR_INTEL, "GenuineIntel" },
+ { CPU_VENDOR_AMD, "AuthenticAMD" },
+};
+
void * __init
cpu_get_boot_stack(void)
{
@@ -182,10 +192,9 @@ cpu_get_boot_stack(void)
static void __init
cpu_preinit(struct cpu *cpu, unsigned int id, unsigned int apic_id)
{
+ memset(cpu, 0, sizeof(*cpu));
cpu->id = id;
cpu->apic_id = apic_id;
- cpu->state = CPU_STATE_OFF;
- cpu->boot_stack = NULL;
}
static void
@@ -430,6 +439,31 @@ cpu_load_idt(const void *idt, size_t size)
asm volatile("lidt %0" : : "m" (idtr));
}
+static const struct cpu_vendor *
+cpu_vendor_lookup(const char *str)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_vendors); i++)
+ if (strcmp(str, cpu_vendors[i].str) == 0)
+ return &cpu_vendors[i];
+
+ return NULL;
+}
+
+static void __init
+cpu_init_vendor_id(struct cpu *cpu)
+{
+ const struct cpu_vendor *vendor;
+
+ vendor = cpu_vendor_lookup(cpu->vendor_str);
+
+ if (vendor == NULL)
+ return;
+
+ cpu->vendor_id = vendor->id;
+}
+
/*
* Initialize the given cpu structure for the current processor.
*/
@@ -456,10 +490,12 @@ cpu_init(struct cpu *cpu)
eax = 0;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
max_basic = eax;
- memcpy(cpu->vendor_id, &ebx, sizeof(ebx));
- memcpy(cpu->vendor_id + 4, &edx, sizeof(edx));
- memcpy(cpu->vendor_id + 8, &ecx, sizeof(ecx));
- cpu->vendor_id[sizeof(cpu->vendor_id) - 1] = '\0';
+ cpu->cpuid_max_basic = max_basic;
+ memcpy(cpu->vendor_str, &ebx, sizeof(ebx));
+ memcpy(cpu->vendor_str + 4, &edx, sizeof(edx));
+ memcpy(cpu->vendor_str + 8, &ecx, sizeof(ecx));
+ cpu->vendor_str[sizeof(cpu->vendor_str) - 1] = '\0';
+ cpu_init_vendor_id(cpu);
/* Some fields are only initialized if supported by the processor */
cpu->model_name[0] = '\0';
@@ -500,6 +536,8 @@ cpu_init(struct cpu *cpu)
max_extended = eax;
}
+ cpu->cpuid_max_extended = max_extended;
+
if (max_extended < 0x80000001) {
cpu->features3 = 0;
cpu->features4 = 0;
@@ -619,7 +657,7 @@ void
cpu_log_info(const struct cpu *cpu)
{
log_info("cpu%u: %s, type %u, family %u, model %u, stepping %u",
- cpu->id, cpu->vendor_id, cpu->type, cpu->family, cpu->model,
+ cpu->id, cpu->vendor_str, cpu->type, cpu->family, cpu->model,
cpu->stepping);
if (strlen(cpu->model_name) > 0) {
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 39f17f8..5299013 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -215,9 +215,13 @@ struct cpu_tss {
uint16_t iobp_base;
} __packed;
-#define CPU_VENDOR_ID_SIZE 13
+#define CPU_VENDOR_STR_SIZE 13
#define CPU_MODEL_NAME_SIZE 49
+#define CPU_VENDOR_UNKNOWN 0
+#define CPU_VENDOR_INTEL 1
+#define CPU_VENDOR_AMD 2
+
/*
* CPU states.
*/
@@ -227,8 +231,11 @@ struct cpu_tss {
struct cpu {
unsigned int id;
unsigned int apic_id;
- char vendor_id[CPU_VENDOR_ID_SIZE];
+ char vendor_str[CPU_VENDOR_STR_SIZE];
char model_name[CPU_MODEL_NAME_SIZE];
+ unsigned int cpuid_max_basic;
+ unsigned int cpuid_max_extended;
+ unsigned int vendor_id;
unsigned int type;
unsigned int family;
unsigned int model;
@@ -534,16 +541,22 @@ cpu_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
: : "memory");
}
+/*
+ * Implies a compiler barrier.
+ */
static __always_inline void
cpu_get_msr(uint32_t msr, uint32_t *high, uint32_t *low)
{
- asm volatile("rdmsr" : "=a" (*low), "=d" (*high) : "c" (msr));
+ asm volatile("rdmsr" : "=a" (*low), "=d" (*high) : "c" (msr) : "memory");
}
-static __always_inline void
+/*
+ * Implies a full memory barrier.
+ */
+static inline void
cpu_set_msr(uint32_t msr, uint32_t high, uint32_t low)
{
- asm volatile("wrmsr" : : "c" (msr), "a" (low), "d" (high));
+ asm volatile("wrmsr" : : "c" (msr), "a" (low), "d" (high) : "memory");
}
static __always_inline uint64_t
diff --git a/arch/x86/machine/pmu.c b/arch/x86/machine/pmu.c
new file mode 100644
index 0000000..9274095
--- /dev/null
+++ b/arch/x86/machine/pmu.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (c) 2014 Remy Noel.
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdint.h>
+
+#include <include/assert.h>
+#include <kern/error.h>
+#include <kern/init.h>
+#include <kern/log.h>
+#include <kern/perfmon.h>
+#include <kern/thread.h>
+#include <machine/cpu.h>
+#include <machine/page.h>
+#include <machine/pmu.h>
+
+struct pmu_driver {
+ void (*info)(void);
+ int (*translate)(unsigned int *raw_event_idp, unsigned int event_id);
+ int (*alloc)(unsigned int *pmc_idp, unsigned int raw_event_id);
+ void (*free)(unsigned int pmc_id);
+ void (*start)(unsigned int pmc_id, unsigned int raw_event_id);
+ void (*stop)(unsigned int pmc_id);
+ uint64_t (*read)(unsigned int pmc_id);
+};
+
+static struct pmu_driver pmu_driver __read_mostly;
+
+
+/* AMD driver */
+
+#define PMU_AMD_MSR_PERFEVTSEL0 0xc0010000
+#define PMU_AMD_MSR_PERCTR0 0xc0010004
+
+#define PMU_AMD_EVTSEL_USR 0x00010000
+#define PMU_AMD_EVTSEL_OS 0x00020000
+#define PMU_AMD_EVTSEL_EN 0x00400000
+
+/*
+ * AMD PMU properties seem to be identical across all processors despite
+ * many of them being implementation-specific.
+ */
+#define PMU_AMD_NR_PMCS 4
+#define PMU_AMD_PMC_WIDTH 48
+
+struct pmu_amd {
+ unsigned int pmc_bm;
+};
+
+struct pmu_amd_event_code {
+ unsigned short event_select;
+ unsigned short umask;
+};
+
+static struct pmu_amd pmu_amd;
+
+/*
+ * TODO Per-family/model event availability database.
+ */
+static const struct pmu_amd_event_code pmu_amd_event_codes[] = {
+ [PMU_AMD_RE_CYCLE] = { 0x76, 0x00 },
+ [PMU_AMD_RE_INSTRUCTION] = { 0xc0, 0x00 },
+ [PMU_AMD_RE_CACHE_REF] = { 0x80, 0x00 },
+ [PMU_AMD_RE_CACHE_MISS] = { 0x81, 0x00 },
+ [PMU_AMD_RE_BRANCH] = { 0xc2, 0x00 },
+ [PMU_AMD_RE_BRANCH_MISS] = { 0xc3, 0x00 },
+ [PMU_AMD_RE_DCACHE_REF] = { 0x40, 0x00 },
+ [PMU_AMD_RE_DCACHE_MISS] = { 0x41, 0x00 },
+ [PMU_AMD_RE_IFETCH_STALL] = { 0x87, 0x00 },
+};
+
+#define PMU_AMD_RE_INVALID ((unsigned int)-1)
+
+static const unsigned int pmu_amd_generic_events[] = {
+ [PERFMON_EV_CYCLE] = PMU_AMD_RE_CYCLE,
+ [PERFMON_EV_REF_CYCLE] = PMU_AMD_RE_INVALID,
+ [PERFMON_EV_INSTRUCTION] = PMU_AMD_RE_INSTRUCTION,
+ [PERFMON_EV_CACHE_REF] = PMU_AMD_RE_CACHE_REF,
+ [PERFMON_EV_CACHE_MISS] = PMU_AMD_RE_CACHE_MISS,
+ [PERFMON_EV_BRANCH] = PMU_AMD_RE_BRANCH,
+ [PERFMON_EV_BRANCH_MISS] = PMU_AMD_RE_BRANCH_MISS,
+};
+
+static struct pmu_amd *
+pmu_amd_get(void)
+{
+ return &pmu_amd;
+}
+
+static void
+pmu_amd_info(void)
+{
+ log_info("pmu: driver: amd, nr_pmcs: %u, pmc_width: %u\n",
+ PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH);
+}
+
+static int
+pmu_amd_translate(unsigned int *raw_event_idp, unsigned int event_id)
+{
+ assert(event_id < ARRAY_SIZE(pmu_amd_generic_events));
+ *raw_event_idp = pmu_amd_generic_events[event_id];
+ return 0;
+}
+
+static int
+pmu_amd_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
+{
+ struct pmu_amd *pmu;
+ unsigned int pmc_id;
+
+ /* TODO Check raw event availability */
+ (void)raw_event_id;
+
+ pmu = pmu_amd_get();
+
+ if (pmu->pmc_bm == 0)
+ return ERROR_AGAIN;
+
+ pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
+ pmu->pmc_bm &= ~(1U << pmc_id);
+ *pmc_idp = pmc_id;
+ return 0;
+}
+
+static void
+pmu_amd_free(unsigned int pmc_id)
+{
+ struct pmu_amd *pmu;
+ unsigned int mask;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+
+ pmu = pmu_amd_get();
+ mask = (1U << pmc_id);
+ assert(!(pmu->pmc_bm & mask));
+ pmu->pmc_bm |= mask;
+}
+
+static void
+pmu_amd_start(unsigned int pmc_id, unsigned int raw_event_id)
+{
+ const struct pmu_amd_event_code *code;
+ uint32_t high, low;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+ assert(raw_event_id < ARRAY_SIZE(pmu_amd_event_codes));
+
+ code = &pmu_amd_event_codes[raw_event_id];
+
+ /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
+ high = code->event_select >> 8;
+ low = PMU_AMD_EVTSEL_EN
+ | PMU_AMD_EVTSEL_OS
+ | PMU_AMD_EVTSEL_USR
+ | (code->umask << 8)
+ | (code->event_select & 0xff);
+ cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, high, low);
+}
+
+static void
+pmu_amd_stop(unsigned int pmc_id)
+{
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+ cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, 0, 0);
+}
+
+static uint64_t
+pmu_amd_read(unsigned int pmc_id)
+{
+ uint32_t high, low;
+
+ assert(pmc_id < PMU_AMD_NR_PMCS);
+
+ cpu_get_msr(PMU_AMD_MSR_PERCTR0 + pmc_id, &high, &low);
+ return (((uint64_t)high << 32) | low);
+}
+
+static int __init
+pmu_amd_setup(const struct cpu *cpu)
+{
+ struct pmu_amd *pmu;
+
+ /* Support AMD Family 10h processors and later */
+ if (cpu->family < 16)
+ return ERROR_NODEV;
+
+ pmu = pmu_amd_get();
+ pmu->pmc_bm = (1U << PMU_AMD_NR_PMCS) - 1;
+
+ pmu_driver.info = pmu_amd_info;
+ pmu_driver.translate = pmu_amd_translate;
+ pmu_driver.alloc = pmu_amd_alloc;
+ pmu_driver.free = pmu_amd_free;
+ pmu_driver.start = pmu_amd_start;
+ pmu_driver.stop = pmu_amd_stop;
+ pmu_driver.read = pmu_amd_read;
+
+ return 0;
+}
+
+
+/* Intel driver */
+
+#define PMU_INTEL_MSR_PMC0 0x0c1
+#define PMU_INTEL_MSR_EVTSEL0 0x186
+
+#define PMU_INTEL_EVTSEL_USR 0x00010000
+#define PMU_INTEL_EVTSEL_OS 0x00020000
+#define PMU_INTEL_EVTSEL_EN 0x00400000
+
+#define PMU_INTEL_ID_VERSION_MASK 0x000000ff
+#define PMU_INTEL_ID_NR_PMCS_MASK 0x0000ff00
+#define PMU_INTEL_ID_NR_PMCS_OFFSET 8
+#define PMU_INTEL_ID_PMC_WIDTH_MASK 0x00ff0000
+#define PMU_INTEL_ID_PMC_WIDTH_OFFSET 16
+#define PMU_INTEL_ID_EVLEN_MASK 0xff000000
+#define PMU_INTEL_ID_EVLEN_OFFSET 24
+#define PMU_INTEL_ID_EVLEN_MAX 7
+
+struct pmu_intel {
+ unsigned int version;
+ unsigned int nr_pmcs;
+ unsigned int pmc_bm;
+ unsigned int pmc_width;
+ unsigned int events;
+};
+
+struct pmu_intel_event_code {
+ unsigned int hw_event_id;
+ unsigned short event_select;
+ unsigned short umask;
+};
+
+static struct pmu_intel pmu_intel;
+
+/*
+ * Intel hardware events.
+ */
+#define PMU_INTEL_EVENT_CYCLE 0x01
+#define PMU_INTEL_EVENT_INSTRUCTION 0x02
+#define PMU_INTEL_EVENT_REF_CYCLE 0x04
+#define PMU_INTEL_EVENT_CACHE_REF 0x08
+#define PMU_INTEL_EVENT_CACHE_MISS 0x10
+#define PMU_INTEL_EVENT_BRANCH 0x20
+#define PMU_INTEL_EVENT_BRANCH_MISS 0x40
+
+static const unsigned int pmu_intel_raw_events[] = {
+ [PERFMON_EV_CYCLE] = PMU_INTEL_RE_CYCLE,
+ [PERFMON_EV_REF_CYCLE] = PMU_INTEL_RE_REF_CYCLE,
+ [PERFMON_EV_INSTRUCTION] = PMU_INTEL_RE_INSTRUCTION,
+ [PERFMON_EV_CACHE_REF] = PMU_INTEL_RE_CACHE_REF,
+ [PERFMON_EV_CACHE_MISS] = PMU_INTEL_RE_CACHE_MISS,
+ [PERFMON_EV_BRANCH] = PMU_INTEL_RE_BRANCH,
+ [PERFMON_EV_BRANCH_MISS] = PMU_INTEL_RE_BRANCH_MISS,
+};
+
+static const struct pmu_intel_event_code pmu_intel_event_codes[] = {
+ [PMU_INTEL_RE_CYCLE] = { PMU_INTEL_EVENT_CYCLE, 0x3c, 0x00 },
+ [PMU_INTEL_RE_REF_CYCLE] = { PMU_INTEL_EVENT_REF_CYCLE, 0x3c, 0x01 },
+ [PMU_INTEL_RE_INSTRUCTION] = { PMU_INTEL_EVENT_INSTRUCTION, 0xc0, 0x00 },
+ [PMU_INTEL_RE_CACHE_REF] = { PMU_INTEL_EVENT_CACHE_REF, 0x2e, 0x4f },
+ [PMU_INTEL_RE_CACHE_MISS] = { PMU_INTEL_EVENT_CACHE_MISS, 0x2e, 0x41 },
+ [PMU_INTEL_RE_BRANCH] = { PMU_INTEL_EVENT_BRANCH, 0xc4, 0x00 },
+ [PMU_INTEL_RE_BRANCH_MISS] = { PMU_INTEL_EVENT_BRANCH_MISS, 0xc5, 0x00 },
+};
+
+static struct pmu_intel *
+pmu_intel_get(void)
+{
+ return &pmu_intel;
+}
+
+static void
+pmu_intel_info(void)
+{
+ const struct pmu_intel *pmu;
+ unsigned int nr_events;
+
+ pmu = pmu_intel_get();
+ nr_events = __builtin_popcount(pmu->events);
+ log_info("pmu: driver: intel, architectural v1\n"
+ "pmu: nr_pmcs: %u, pmc_width: %u, events: %#x, nr_events: %u\n",
+ pmu->nr_pmcs, pmu->pmc_width, pmu->events, nr_events);
+}
+
+static int
+pmu_intel_translate(unsigned int *raw_event_idp, unsigned event_id)
+{
+ if (event_id >= ARRAY_SIZE(pmu_intel_raw_events))
+ return ERROR_INVAL;
+
+ *raw_event_idp = pmu_intel_raw_events[event_id];
+
+ return 0;
+}
+
+static int
+pmu_intel_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
+{
+ struct pmu_intel *pmu;
+ unsigned int pmc_id;
+ unsigned int hw_event_id;
+
+ pmu = pmu_intel_get();
+ assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+ hw_event_id = pmu_intel_event_codes[raw_event_id].hw_event_id;
+
+ if (!(pmu->events & hw_event_id))
+ return ERROR_INVAL;
+
+ if (pmu->pmc_bm == 0)
+ return ERROR_AGAIN;
+
+ pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
+ pmu->pmc_bm &= ~(1U << pmc_id);
+ *pmc_idp = pmc_id;
+ return 0;
+}
+
+static void
+pmu_intel_free(unsigned int pmc_id)
+{
+ struct pmu_intel *pmu;
+ unsigned int mask;
+
+ pmu = pmu_intel_get();
+ mask = (1U << pmc_id);
+ assert(!(pmu->pmc_bm & mask));
+ pmu->pmc_bm |= mask;
+}
+
+static void
+pmu_intel_start(unsigned int pmc_id, unsigned int raw_event_id)
+{
+ const struct pmu_intel_event_code *code;
+ uint32_t evtsel;
+
+ assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+ code = &pmu_intel_event_codes[raw_event_id];
+
+ /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
+ evtsel = PMU_INTEL_EVTSEL_EN
+ | PMU_INTEL_EVTSEL_OS
+ | PMU_INTEL_EVTSEL_USR
+ | (code->umask << 8)
+ | code->event_select;
+ cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, evtsel);
+}
+
+static void
+pmu_intel_stop(unsigned int pmc_id)
+{
+ cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, 0);
+}
+
+static uint64_t
+pmu_intel_read(unsigned int pmc_id)
+{
+ uint32_t high, low;
+
+ cpu_get_msr(PMU_INTEL_MSR_PMC0 + pmc_id, &high, &low);
+ return (((uint64_t)high << 32) | low);
+}
+
+static int __init
+pmu_intel_setup(const struct cpu *cpu)
+{
+ struct pmu_intel *pmu;
+ unsigned int eax, ebx, ecx, edx, ev_len;
+
+ eax = 0xa;
+
+ if (cpu->cpuid_max_basic < eax)
+ return ERROR_NODEV;
+
+ pmu = pmu_intel_get();
+ cpu_cpuid(&eax, &ebx, &ecx, &edx);
+ pmu->version = eax & PMU_INTEL_ID_VERSION_MASK;
+
+ if ((pmu->version == 0) || (pmu->version > 3))
+ return ERROR_NODEV;
+
+ pmu->nr_pmcs = (eax & PMU_INTEL_ID_NR_PMCS_MASK)
+ >> PMU_INTEL_ID_NR_PMCS_OFFSET;
+ pmu->pmc_bm = (1U << pmu->nr_pmcs ) - 1;
+ pmu->pmc_width = (eax & PMU_INTEL_ID_PMC_WIDTH_MASK)
+ >> PMU_INTEL_ID_PMC_WIDTH_OFFSET;
+ ev_len = (eax & PMU_INTEL_ID_EVLEN_MASK) >> PMU_INTEL_ID_EVLEN_OFFSET;
+ assert(ev_len <= PMU_INTEL_ID_EVLEN_MAX);
+ pmu->events = ~ebx & ((1U << ev_len) - 1);
+
+ pmu_driver.info = pmu_intel_info;
+ pmu_driver.translate = pmu_intel_translate;
+ pmu_driver.alloc = pmu_intel_alloc;
+ pmu_driver.free = pmu_intel_free;
+ pmu_driver.start = pmu_intel_start;
+ pmu_driver.stop = pmu_intel_stop;
+ pmu_driver.read = pmu_intel_read;
+ return 0;
+}
+
+
+/* Common code */
+
+static int __init
+pmu_setup(void)
+{
+ const struct cpu *cpu;
+ int error;
+
+ cpu = cpu_current();
+
+ switch (cpu->vendor_id) {
+ case CPU_VENDOR_AMD:
+ error = pmu_amd_setup(cpu);
+ break;
+ case CPU_VENDOR_INTEL:
+ error = pmu_intel_setup(cpu);
+ break;
+ default:
+ error = ERROR_NODEV;
+ }
+
+ if (error) {
+ return error;
+ }
+
+ pmu_driver.info();
+
+ return 0;
+}
+
+INIT_OP_DEFINE(pmu_setup,
+ INIT_OP_DEP(cpu_setup, true),
+ INIT_OP_DEP(log_setup, true));
+
+int
+pmu_translate(unsigned int *raw_event_idp, unsigned int event_id)
+{
+ if (pmu_driver.translate == NULL)
+ return ERROR_NODEV;
+
+ return pmu_driver.translate(raw_event_idp, event_id);
+}
+
+int
+pmu_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
+{
+ assert(pmu_driver.alloc != NULL);
+ return pmu_driver.alloc(pmc_idp, raw_event_id);
+}
+
+void
+pmu_free(unsigned int pmc_id)
+{
+ assert(pmu_driver.free != NULL);
+ pmu_driver.free(pmc_id);
+}
+
+void
+pmu_start(unsigned int pmc_id, unsigned int raw_event_id)
+{
+ assert(pmu_driver.start != NULL);
+ assert(!thread_preempt_enabled());
+ pmu_driver.start(pmc_id, raw_event_id);
+}
+
+void
+pmu_stop(unsigned int pmc_id)
+{
+ assert(pmu_driver.stop != NULL);
+ assert(!thread_preempt_enabled());
+ pmu_driver.stop(pmc_id);
+}
+
+uint64_t
+pmu_read(unsigned int pmc_id)
+{
+ assert(pmu_driver.read != NULL);
+ return pmu_driver.read(pmc_id);
+}
diff --git a/arch/x86/machine/pmu.h b/arch/x86/machine/pmu.h
new file mode 100644
index 0000000..7d0e10a
--- /dev/null
+++ b/arch/x86/machine/pmu.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014 Remy Noel.
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * PMU (Processing Monitoring Unit) interface module.
+ * Provides acces to harware PMCs(Performance Monitoring Counters).
+ *
+ * TODO Overflow handling.
+ */
+
+#ifndef _X86_PMU_H
+#define _X86_PMU_H
+
+#include <stdint.h>
+
+#include <kern/init.h>
+
+/*
+ * AMD raw event IDs.
+ */
+#define PMU_AMD_RE_CYCLE 0
+#define PMU_AMD_RE_INSTRUCTION 1
+#define PMU_AMD_RE_CACHE_REF 2
+#define PMU_AMD_RE_CACHE_MISS 3
+#define PMU_AMD_RE_BRANCH 4
+#define PMU_AMD_RE_BRANCH_MISS 5
+#define PMU_AMD_RE_DCACHE_REF 6
+#define PMU_AMD_RE_DCACHE_MISS 7
+#define PMU_AMD_RE_IFETCH_STALL 8
+
+/*
+ * Intel raw event IDs.
+ */
+#define PMU_INTEL_RE_CYCLE 0
+#define PMU_INTEL_RE_REF_CYCLE 1
+#define PMU_INTEL_RE_INSTRUCTION 2
+#define PMU_INTEL_RE_CACHE_REF 3
+#define PMU_INTEL_RE_CACHE_MISS 4
+#define PMU_INTEL_RE_BRANCH 5
+#define PMU_INTEL_RE_BRANCH_MISS 6
+
+/*
+ * Translate a generic event ID into a raw event ID.
+ *
+ * Translations are invariant.
+ */
+int pmu_translate(unsigned int *raw_event_idp, unsigned int event_id);
+
+/*
+ * Allocate a performance monitoring counter (PMC).
+ *
+ * The allocation is global to the whole system. It is the responsibility
+ * of the caller to serialize calls to this function.
+ */
+int pmu_alloc(unsigned int *pmc_idp, unsigned int raw_event_id);
+
+/*
+ * Free a PMC.
+ *
+ * As for allocations, the caller must serialize calls to this function.
+ */
+void pmu_free(unsigned int pmc_id);
+
+/*
+ * Start a PMC.
+ *
+ * This function only affects the current processor. Preemption must be
+ * disabled when calling this function.
+ */
+void pmu_start(unsigned int pmc_id, unsigned int raw_event_id);
+
+/*
+ * Stop a PMC.
+ *
+ * This function only affects the current processor. Preemption must be
+ * disabled when calling this function.
+ */
+void pmu_stop(unsigned int pmc_id);
+
+/*
+ * Return the current value of the given PMC.
+ */
+uint64_t pmu_read(unsigned int pmc_id);
+
+/*
+ * This init operation provides :
+ * - module fully initialized
+ */
+INIT_OP_DECLARE(pmu_setup);
+
+#endif /* _X86_PMU_H */