diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/machine/cpu.c | 52 | ||||
-rw-r--r-- | arch/x86/machine/cpu.h | 23 | ||||
-rw-r--r-- | arch/x86/machine/pmu_amd.c | 240 | ||||
-rw-r--r-- | arch/x86/machine/pmu_intel.c | 268 |
5 files changed, 574 insertions, 12 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 9866d93..002ed44 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -61,3 +61,6 @@ x15_SOURCES-y += \ arch/x86/machine/trap_asm.S \ arch/x86/machine/trap.c \ arch/x86/machine/uart.c + +x15_SOURCES-$(CONFIG_PERFMON) += arch/x86/machine/pmu_amd.c +x15_SOURCES-$(CONFIG_PERFMON) += arch/x86/machine/pmu_intel.c diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c index 98d3680..54a2676 100644 --- a/arch/x86/machine/cpu.c +++ b/arch/x86/machine/cpu.c @@ -69,6 +69,11 @@ #define CPU_INVALID_APIC_ID ((unsigned int)-1) +struct cpu_vendor { + unsigned int id; + const char *str; +}; + /* * MP related CMOS ports, registers and values. */ @@ -173,6 +178,11 @@ cpu_delay(unsigned long usecs) } while (total > 0); } +static const struct cpu_vendor cpu_vendors[] = { + { CPU_VENDOR_INTEL, "GenuineIntel" }, + { CPU_VENDOR_AMD, "AuthenticAMD" }, +}; + void * __init cpu_get_boot_stack(void) { @@ -182,10 +192,9 @@ cpu_get_boot_stack(void) static void __init cpu_preinit(struct cpu *cpu, unsigned int id, unsigned int apic_id) { + memset(cpu, 0, sizeof(*cpu)); cpu->id = id; cpu->apic_id = apic_id; - cpu->state = CPU_STATE_OFF; - cpu->boot_stack = NULL; } static void @@ -430,6 +439,31 @@ cpu_load_idt(const void *idt, size_t size) asm volatile("lidt %0" : : "m" (idtr)); } +static const struct cpu_vendor * +cpu_vendor_lookup(const char *str) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(cpu_vendors); i++) + if (strcmp(str, cpu_vendors[i].str) == 0) + return &cpu_vendors[i]; + + return NULL; +} + +static void __init +cpu_init_vendor_id(struct cpu *cpu) +{ + const struct cpu_vendor *vendor; + + vendor = cpu_vendor_lookup(cpu->vendor_str); + + if (vendor == NULL) + return; + + cpu->vendor_id = vendor->id; +} + /* * Initialize the given cpu structure for the current processor. */ @@ -456,10 +490,12 @@ cpu_init(struct cpu *cpu) eax = 0; cpu_cpuid(&eax, &ebx, &ecx, &edx); max_basic = eax; - memcpy(cpu->vendor_id, &ebx, sizeof(ebx)); - memcpy(cpu->vendor_id + 4, &edx, sizeof(edx)); - memcpy(cpu->vendor_id + 8, &ecx, sizeof(ecx)); - cpu->vendor_id[sizeof(cpu->vendor_id) - 1] = '\0'; + cpu->cpuid_max_basic = max_basic; + memcpy(cpu->vendor_str, &ebx, sizeof(ebx)); + memcpy(cpu->vendor_str + 4, &edx, sizeof(edx)); + memcpy(cpu->vendor_str + 8, &ecx, sizeof(ecx)); + cpu->vendor_str[sizeof(cpu->vendor_str) - 1] = '\0'; + cpu_init_vendor_id(cpu); /* Some fields are only initialized if supported by the processor */ cpu->model_name[0] = '\0'; @@ -498,6 +534,8 @@ cpu_init(struct cpu *cpu) max_extended = eax; } + cpu->cpuid_max_extended = max_extended; + if (max_extended < 0x80000001) { cpu->features3 = 0; cpu->features4 = 0; @@ -617,7 +655,7 @@ void cpu_log_info(const struct cpu *cpu) { log_info("cpu%u: %s, type %u, family %u, model %u, stepping %u", - cpu->id, cpu->vendor_id, cpu->type, cpu->family, cpu->model, + cpu->id, cpu->vendor_str, cpu->type, cpu->family, cpu->model, cpu->stepping); if (strlen(cpu->model_name) > 0) { diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h index e1f8b8e..3b7db61 100644 --- a/arch/x86/machine/cpu.h +++ b/arch/x86/machine/cpu.h @@ -218,9 +218,13 @@ struct cpu_tss { uint16_t iobp_base; } __packed; -#define CPU_VENDOR_ID_SIZE 13 +#define CPU_VENDOR_STR_SIZE 13 #define CPU_MODEL_NAME_SIZE 49 +#define CPU_VENDOR_UNKNOWN 0 +#define CPU_VENDOR_INTEL 1 +#define CPU_VENDOR_AMD 2 + /* * CPU states. */ @@ -230,8 +234,11 @@ struct cpu_tss { struct cpu { unsigned int id; unsigned int apic_id; - char vendor_id[CPU_VENDOR_ID_SIZE]; + char vendor_str[CPU_VENDOR_STR_SIZE]; char model_name[CPU_MODEL_NAME_SIZE]; + unsigned int cpuid_max_basic; + unsigned int cpuid_max_extended; + unsigned int vendor_id; unsigned int type; unsigned int family; unsigned int model; @@ -537,16 +544,22 @@ cpu_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, : : "memory"); } +/* + * Implies a compiler barrier. + */ static __always_inline void cpu_get_msr(uint32_t msr, uint32_t *high, uint32_t *low) { - asm volatile("rdmsr" : "=a" (*low), "=d" (*high) : "c" (msr)); + asm volatile("rdmsr" : "=a" (*low), "=d" (*high) : "c" (msr) : "memory"); } -static __always_inline void +/* + * Implies a full memory barrier. + */ +static inline void cpu_set_msr(uint32_t msr, uint32_t high, uint32_t low) { - asm volatile("wrmsr" : : "c" (msr), "a" (low), "d" (high)); + asm volatile("wrmsr" : : "c" (msr), "a" (low), "d" (high) : "memory"); } static __always_inline uint64_t diff --git a/arch/x86/machine/pmu_amd.c b/arch/x86/machine/pmu_amd.c new file mode 100644 index 0000000..49b8d7e --- /dev/null +++ b/arch/x86/machine/pmu_amd.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2014 Remy Noel. + * Copyright (c) 2014 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * AMD PMU driver module. + */ + +#include <stdint.h> + +#include <include/assert.h> +#include <kern/error.h> +#include <kern/init.h> +#include <kern/log.h> +#include <machine/cpu.h> +#include <kern/perfmon.h> + +/* + * AMD raw event IDs. + */ +#define PMU_AMD_RE_CYCLE 0 +#define PMU_AMD_RE_INSTRUCTION 1 +#define PMU_AMD_RE_CACHE_REF 2 +#define PMU_AMD_RE_CACHE_MISS 3 +#define PMU_AMD_RE_BRANCH 4 +#define PMU_AMD_RE_BRANCH_MISS 5 +#define PMU_AMD_RE_DCACHE_REF 6 +#define PMU_AMD_RE_DCACHE_MISS 7 +#define PMU_AMD_RE_IFETCH_STALL 8 + + +/* + * PMU MSR addresses + */ +#define PMU_AMD_MSR_PERFEVTSEL0 0xc0010000 +#define PMU_AMD_MSR_PERCTR0 0xc0010004 + +/* + * Event Select Register addresses + */ +#define PMU_AMD_EVTSEL_USR 0x00010000 +#define PMU_AMD_EVTSEL_OS 0x00020000 +#define PMU_AMD_EVTSEL_EN 0x00400000 + +/* + * AMD PMU properties seem to be identical across all processors despite + * many of them being implementation-specific. + */ +#define PMU_AMD_NR_PMCS 4 +#define PMU_AMD_PMC_WIDTH 48 + +struct pmu_amd { + unsigned int pmc_bm; +}; + +struct pmu_amd_event_code { + unsigned short event_select; + unsigned short umask; +}; + +static struct pmu_amd pmu_amd; + +/* + * TODO Per-family/model event availability database. + */ +static const struct pmu_amd_event_code pmu_amd_event_codes[] = { + [PMU_AMD_RE_CYCLE] = { 0x76, 0x00 }, + [PMU_AMD_RE_INSTRUCTION] = { 0xc0, 0x00 }, + [PMU_AMD_RE_CACHE_REF] = { 0x80, 0x00 }, + [PMU_AMD_RE_CACHE_MISS] = { 0x81, 0x00 }, + [PMU_AMD_RE_BRANCH] = { 0xc2, 0x00 }, + [PMU_AMD_RE_BRANCH_MISS] = { 0xc3, 0x00 }, + [PMU_AMD_RE_DCACHE_REF] = { 0x40, 0x00 }, + [PMU_AMD_RE_DCACHE_MISS] = { 0x41, 0x00 }, + [PMU_AMD_RE_IFETCH_STALL] = { 0x87, 0x00 }, +}; + +#define PMU_AMD_RE_INVALID ((unsigned int)-1) + +static const unsigned int pmu_amd_generic_events[] = { + [PERFMON_EV_CYCLE] = PMU_AMD_RE_CYCLE, + [PERFMON_EV_REF_CYCLE] = PMU_AMD_RE_INVALID, + [PERFMON_EV_INSTRUCTION] = PMU_AMD_RE_INSTRUCTION, + [PERFMON_EV_CACHE_REF] = PMU_AMD_RE_CACHE_REF, + [PERFMON_EV_CACHE_MISS] = PMU_AMD_RE_CACHE_MISS, + [PERFMON_EV_BRANCH] = PMU_AMD_RE_BRANCH, + [PERFMON_EV_BRANCH_MISS] = PMU_AMD_RE_BRANCH_MISS, +}; + +static struct pmu_amd * +pmu_amd_get(void) +{ + return &pmu_amd; +} + +static void +pmu_amd_info(void) +{ + log_info("pmu: driver: amd, nr_pmcs: %u, pmc_width: %u\n", + PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH); +} + +static int +pmu_amd_translate(unsigned int *raw_event_idp, unsigned int event_id) +{ + assert(event_id < ARRAY_SIZE(pmu_amd_generic_events)); + + *raw_event_idp = pmu_amd_generic_events[event_id]; + + return 0; +} + +static int +pmu_amd_alloc(unsigned int *pmc_idp, unsigned int raw_event_id) +{ + struct pmu_amd *pmu; + unsigned int pmc_id; + + /* TODO Check raw event availability */ + (void)raw_event_id; + + pmu = pmu_amd_get(); + + if (pmu->pmc_bm == 0) { + return EAGAIN; + } + + pmc_id = __builtin_ffs(pmu->pmc_bm) - 1; + pmu->pmc_bm &= ~(1U << pmc_id); + *pmc_idp = pmc_id; + + return 0; +} + +static void +pmu_amd_free(unsigned int pmc_id) +{ + struct pmu_amd *pmu; + unsigned int mask; + + assert(pmc_id < PMU_AMD_NR_PMCS); + + pmu = pmu_amd_get(); + mask = (1U << pmc_id); + + assert(!(pmu->pmc_bm & mask)); + + pmu->pmc_bm |= mask; +} + +static void +pmu_amd_start(unsigned int pmc_id, unsigned int raw_event_id) +{ + const struct pmu_amd_event_code *code; + uint32_t high, low; + + assert(pmc_id < PMU_AMD_NR_PMCS); + assert(raw_event_id < ARRAY_SIZE(pmu_amd_event_codes)); + + code = &pmu_amd_event_codes[raw_event_id]; + + /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */ + high = code->event_select >> 8; + low = PMU_AMD_EVTSEL_EN + | PMU_AMD_EVTSEL_OS + | PMU_AMD_EVTSEL_USR + | (code->umask << 8) + | (code->event_select & 0xff); + cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, high, low); +} + +static void +pmu_amd_stop(unsigned int pmc_id) +{ + assert(pmc_id < PMU_AMD_NR_PMCS); + + cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, 0, 0); +} + +static uint64_t +pmu_amd_read(unsigned int pmc_id) +{ + uint32_t high, low; + + assert(pmc_id < PMU_AMD_NR_PMCS); + + cpu_get_msr(PMU_AMD_MSR_PERCTR0 + pmc_id, &high, &low); + + return (((uint64_t)high << 32) | low); +} + +static int __init +pmu_amd_setup(void) +{ + const struct cpu *cpu; + struct pmu_amd *pmu; + struct perfmon_pmu_ops pmu_driver; + + cpu = cpu_current(); + + if (cpu->vendor_id != CPU_VENDOR_AMD) { + return 0; + } + + /* Support AMD Family 10h processors and later */ + if (cpu->family < 16) { + return ENODEV; + } + + pmu = pmu_amd_get(); + pmu->pmc_bm = (1U << PMU_AMD_NR_PMCS) - 1; + + pmu_driver.info = pmu_amd_info; + pmu_driver.translate = pmu_amd_translate; + pmu_driver.alloc = pmu_amd_alloc; + pmu_driver.free = pmu_amd_free; + pmu_driver.start = pmu_amd_start; + pmu_driver.stop = pmu_amd_stop; + pmu_driver.read = pmu_amd_read; + + return perfmon_pmu_register(&pmu_driver); +} + +INIT_OP_DEFINE(pmu_amd_setup, + INIT_OP_DEP(perfmon_bootstrap, true), + INIT_OP_DEP(cpu_setup, true), + INIT_OP_DEP(log_setup, true)); + diff --git a/arch/x86/machine/pmu_intel.c b/arch/x86/machine/pmu_intel.c new file mode 100644 index 0000000..20d7aa2 --- /dev/null +++ b/arch/x86/machine/pmu_intel.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2014 Remy Noel. + * Copyright (c) 2014 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * INTEL PMU driver module. + */ + +#include <stdint.h> + +#include <include/assert.h> +#include <kern/error.h> +#include <kern/init.h> +#include <kern/log.h> +#include <machine/cpu.h> +#include <kern/perfmon.h> + +/* + * Intel raw event IDs. + */ +#define PMU_INTEL_RE_CYCLE 0 +#define PMU_INTEL_RE_REF_CYCLE 1 +#define PMU_INTEL_RE_INSTRUCTION 2 +#define PMU_INTEL_RE_CACHE_REF 3 +#define PMU_INTEL_RE_CACHE_MISS 4 +#define PMU_INTEL_RE_BRANCH 5 +#define PMU_INTEL_RE_BRANCH_MISS 6 + +/* + * PMU MSR addresses + */ +#define PMU_INTEL_MSR_PMC0 0x0c1 +#define PMU_INTEL_MSR_EVTSEL0 0x186 + +/* + * Event Select Register addresses + */ +#define PMU_INTEL_EVTSEL_USR 0x00010000 +#define PMU_INTEL_EVTSEL_OS 0x00020000 +#define PMU_INTEL_EVTSEL_EN 0x00400000 + +#define PMU_INTEL_ID_VERSION_MASK 0x000000ff +#define PMU_INTEL_ID_NR_PMCS_MASK 0x0000ff00 +#define PMU_INTEL_ID_NR_PMCS_OFFSET 8 +#define PMU_INTEL_ID_PMC_WIDTH_MASK 0x00ff0000 +#define PMU_INTEL_ID_PMC_WIDTH_OFFSET 16 +#define PMU_INTEL_ID_EVLEN_MASK 0xff000000 +#define PMU_INTEL_ID_EVLEN_OFFSET 24 +#define PMU_INTEL_ID_EVLEN_MAX 7 + +struct pmu_intel { + unsigned int version; + unsigned int nr_pmcs; + unsigned int pmc_bm; + unsigned int pmc_width; + unsigned int events; +}; + +struct pmu_intel_event_code { + unsigned int hw_event_id; + unsigned short event_select; + unsigned short umask; +}; + +static struct pmu_intel pmu_intel; + +/* + * Intel hardware events. + */ +#define PMU_INTEL_EVENT_CYCLE 0x01 +#define PMU_INTEL_EVENT_INSTRUCTION 0x02 +#define PMU_INTEL_EVENT_REF_CYCLE 0x04 +#define PMU_INTEL_EVENT_CACHE_REF 0x08 +#define PMU_INTEL_EVENT_CACHE_MISS 0x10 +#define PMU_INTEL_EVENT_BRANCH 0x20 +#define PMU_INTEL_EVENT_BRANCH_MISS 0x40 + +static const unsigned int pmu_intel_raw_events[] = { + [PERFMON_EV_CYCLE] = PMU_INTEL_RE_CYCLE, + [PERFMON_EV_REF_CYCLE] = PMU_INTEL_RE_REF_CYCLE, + [PERFMON_EV_INSTRUCTION] = PMU_INTEL_RE_INSTRUCTION, + [PERFMON_EV_CACHE_REF] = PMU_INTEL_RE_CACHE_REF, + [PERFMON_EV_CACHE_MISS] = PMU_INTEL_RE_CACHE_MISS, + [PERFMON_EV_BRANCH] = PMU_INTEL_RE_BRANCH, + [PERFMON_EV_BRANCH_MISS] = PMU_INTEL_RE_BRANCH_MISS, +}; + +static const struct pmu_intel_event_code pmu_intel_event_codes[] = { + [PMU_INTEL_RE_CYCLE] = { PMU_INTEL_EVENT_CYCLE, 0x3c, 0x00 }, + [PMU_INTEL_RE_REF_CYCLE] = { PMU_INTEL_EVENT_REF_CYCLE, 0x3c, 0x01 }, + [PMU_INTEL_RE_INSTRUCTION] = { PMU_INTEL_EVENT_INSTRUCTION, 0xc0, 0x00 }, + [PMU_INTEL_RE_CACHE_REF] = { PMU_INTEL_EVENT_CACHE_REF, 0x2e, 0x4f }, + [PMU_INTEL_RE_CACHE_MISS] = { PMU_INTEL_EVENT_CACHE_MISS, 0x2e, 0x41 }, + [PMU_INTEL_RE_BRANCH] = { PMU_INTEL_EVENT_BRANCH, 0xc4, 0x00 }, + [PMU_INTEL_RE_BRANCH_MISS] = { PMU_INTEL_EVENT_BRANCH_MISS, 0xc5, 0x00 }, +}; + +static struct pmu_intel * +pmu_intel_get(void) +{ + return &pmu_intel; +} + +static void +pmu_intel_info(void) +{ + const struct pmu_intel *pmu; + unsigned int nr_events; + + pmu = pmu_intel_get(); + nr_events = __builtin_popcount(pmu->events); + log_info("pmu: driver: intel, architectural v1\n" + "pmu: nr_pmcs: %u, pmc_width: %u, events: %#x, nr_events: %u\n", + pmu->nr_pmcs, pmu->pmc_width, pmu->events, nr_events); +} + +static int +pmu_intel_translate(unsigned int *raw_event_idp, unsigned event_id) +{ + if (event_id >= ARRAY_SIZE(pmu_intel_raw_events)) { + return EINVAL; + } + + *raw_event_idp = pmu_intel_raw_events[event_id]; + + return 0; +} + +static int +pmu_intel_alloc(unsigned int *pmc_idp, unsigned int raw_event_id) +{ + struct pmu_intel *pmu; + unsigned int pmc_id; + unsigned int hw_event_id; + + assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes)); + + pmu = pmu_intel_get(); + hw_event_id = pmu_intel_event_codes[raw_event_id].hw_event_id; + + if (!(pmu->events & hw_event_id)) { + return EINVAL; + } + + if (pmu->pmc_bm == 0) { + return EAGAIN; + } + + pmc_id = __builtin_ffs(pmu->pmc_bm) - 1; + pmu->pmc_bm &= ~(1U << pmc_id); + *pmc_idp = pmc_id; + return 0; +} + +static void +pmu_intel_free(unsigned int pmc_id) +{ + struct pmu_intel *pmu; + unsigned int mask; + + pmu = pmu_intel_get(); + mask = (1U << pmc_id); + + assert(!(pmu->pmc_bm & mask)); + + pmu->pmc_bm |= mask; +} + +static void +pmu_intel_start(unsigned int pmc_id, unsigned int raw_event_id) +{ + const struct pmu_intel_event_code *code; + uint32_t evtsel; + + assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes)); + + code = &pmu_intel_event_codes[raw_event_id]; + + /* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */ + evtsel = PMU_INTEL_EVTSEL_EN + | PMU_INTEL_EVTSEL_OS + | PMU_INTEL_EVTSEL_USR + | (code->umask << 8) + | code->event_select; + cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, evtsel); +} + +static void +pmu_intel_stop(unsigned int pmc_id) +{ + cpu_set_msr(PMU_INTEL_MSR_EVTSEL0 + pmc_id, 0, 0); +} + +static uint64_t +pmu_intel_read(unsigned int pmc_id) +{ + uint32_t high, low; + + cpu_get_msr(PMU_INTEL_MSR_PMC0 + pmc_id, &high, &low); + + return (((uint64_t)high << 32) | low); +} + +static int __init +pmu_intel_setup(void) +{ + const struct cpu *cpu; + struct pmu_intel *pmu; + struct perfmon_pmu_ops pmu_driver; + unsigned int eax, ebx, ecx, edx, ev_len; + + cpu = cpu_current(); + eax = 0xa; + + if (cpu->vendor_id != CPU_VENDOR_INTEL) { + return 0; + } + + if (cpu->cpuid_max_basic < eax) { + return ENODEV; + } + + pmu = pmu_intel_get(); + cpu_cpuid(&eax, &ebx, &ecx, &edx); + pmu->version = eax & PMU_INTEL_ID_VERSION_MASK; + + if ((pmu->version == 0) || (pmu->version > 3)) { + return ENODEV; + } + + pmu->nr_pmcs = (eax & PMU_INTEL_ID_NR_PMCS_MASK) + >> PMU_INTEL_ID_NR_PMCS_OFFSET; + pmu->pmc_bm = (1U << pmu->nr_pmcs ) - 1; + pmu->pmc_width = (eax & PMU_INTEL_ID_PMC_WIDTH_MASK) + >> PMU_INTEL_ID_PMC_WIDTH_OFFSET; + ev_len = (eax & PMU_INTEL_ID_EVLEN_MASK) >> PMU_INTEL_ID_EVLEN_OFFSET; + + assert(ev_len <= PMU_INTEL_ID_EVLEN_MAX); + + pmu->events = ~ebx & ((1U << ev_len) - 1); + + pmu_driver.info = pmu_intel_info; + pmu_driver.translate = pmu_intel_translate; + pmu_driver.alloc = pmu_intel_alloc; + pmu_driver.free = pmu_intel_free; + pmu_driver.start = pmu_intel_start; + pmu_driver.stop = pmu_intel_stop; + pmu_driver.read = pmu_intel_read; + + return perfmon_pmu_register(&pmu_driver); +} + +INIT_OP_DEFINE(pmu_intel_setup, + INIT_OP_DEP(perfmon_bootstrap, true), + INIT_OP_DEP(cpu_setup, true), + INIT_OP_DEP(log_setup, true)); |