summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/cpu.h2
-rw-r--r--arch/x86/machine/pmu_amd.c15
-rw-r--r--arch/x86/machine/pmu_intel.c24
-rw-r--r--kern/perfmon.c66
-rw-r--r--test/test_perfmon_thread.c18
-rw-r--r--test/test_perfmon_torture.c52
6 files changed, 107 insertions, 70 deletions
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 719a6a5..3b7db61 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -218,7 +218,7 @@ struct cpu_tss {
uint16_t iobp_base;
} __packed;
-#define CPU_VENDOR_STR_SIZE 13
+#define CPU_VENDOR_STR_SIZE 13
#define CPU_MODEL_NAME_SIZE 49
#define CPU_VENDOR_UNKNOWN 0
diff --git a/arch/x86/machine/pmu_amd.c b/arch/x86/machine/pmu_amd.c
index b8b4c78..49b8d7e 100644
--- a/arch/x86/machine/pmu_amd.c
+++ b/arch/x86/machine/pmu_amd.c
@@ -109,14 +109,16 @@ static void
pmu_amd_info(void)
{
log_info("pmu: driver: amd, nr_pmcs: %u, pmc_width: %u\n",
- PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH);
+ PMU_AMD_NR_PMCS, PMU_AMD_PMC_WIDTH);
}
static int
pmu_amd_translate(unsigned int *raw_event_idp, unsigned int event_id)
{
assert(event_id < ARRAY_SIZE(pmu_amd_generic_events));
+
*raw_event_idp = pmu_amd_generic_events[event_id];
+
return 0;
}
@@ -131,12 +133,14 @@ pmu_amd_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
pmu = pmu_amd_get();
- if (pmu->pmc_bm == 0)
+ if (pmu->pmc_bm == 0) {
return EAGAIN;
+ }
pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
pmu->pmc_bm &= ~(1U << pmc_id);
*pmc_idp = pmc_id;
+
return 0;
}
@@ -150,7 +154,9 @@ pmu_amd_free(unsigned int pmc_id)
pmu = pmu_amd_get();
mask = (1U << pmc_id);
+
assert(!(pmu->pmc_bm & mask));
+
pmu->pmc_bm |= mask;
}
@@ -179,6 +185,7 @@ static void
pmu_amd_stop(unsigned int pmc_id)
{
assert(pmc_id < PMU_AMD_NR_PMCS);
+
cpu_set_msr(PMU_AMD_MSR_PERFEVTSEL0 + pmc_id, 0, 0);
}
@@ -190,6 +197,7 @@ pmu_amd_read(unsigned int pmc_id)
assert(pmc_id < PMU_AMD_NR_PMCS);
cpu_get_msr(PMU_AMD_MSR_PERCTR0 + pmc_id, &high, &low);
+
return (((uint64_t)high << 32) | low);
}
@@ -207,8 +215,9 @@ pmu_amd_setup(void)
}
/* Support AMD Family 10h processors and later */
- if (cpu->family < 16)
+ if (cpu->family < 16) {
return ENODEV;
+ }
pmu = pmu_amd_get();
pmu->pmc_bm = (1U << PMU_AMD_NR_PMCS) - 1;
diff --git a/arch/x86/machine/pmu_intel.c b/arch/x86/machine/pmu_intel.c
index 4671a9f..20d7aa2 100644
--- a/arch/x86/machine/pmu_intel.c
+++ b/arch/x86/machine/pmu_intel.c
@@ -129,8 +129,9 @@ pmu_intel_info(void)
static int
pmu_intel_translate(unsigned int *raw_event_idp, unsigned event_id)
{
- if (event_id >= ARRAY_SIZE(pmu_intel_raw_events))
+ if (event_id >= ARRAY_SIZE(pmu_intel_raw_events)) {
return EINVAL;
+ }
*raw_event_idp = pmu_intel_raw_events[event_id];
@@ -144,15 +145,18 @@ pmu_intel_alloc(unsigned int *pmc_idp, unsigned int raw_event_id)
unsigned int pmc_id;
unsigned int hw_event_id;
- pmu = pmu_intel_get();
assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+
+ pmu = pmu_intel_get();
hw_event_id = pmu_intel_event_codes[raw_event_id].hw_event_id;
- if (!(pmu->events & hw_event_id))
+ if (!(pmu->events & hw_event_id)) {
return EINVAL;
+ }
- if (pmu->pmc_bm == 0)
+ if (pmu->pmc_bm == 0) {
return EAGAIN;
+ }
pmc_id = __builtin_ffs(pmu->pmc_bm) - 1;
pmu->pmc_bm &= ~(1U << pmc_id);
@@ -168,7 +172,9 @@ pmu_intel_free(unsigned int pmc_id)
pmu = pmu_intel_get();
mask = (1U << pmc_id);
+
assert(!(pmu->pmc_bm & mask));
+
pmu->pmc_bm |= mask;
}
@@ -179,6 +185,7 @@ pmu_intel_start(unsigned int pmc_id, unsigned int raw_event_id)
uint32_t evtsel;
assert(raw_event_id < ARRAY_SIZE(pmu_intel_event_codes));
+
code = &pmu_intel_event_codes[raw_event_id];
/* TODO Handle PERFMON_EF_KERN/PERFMON_EF_USER */
@@ -202,6 +209,7 @@ pmu_intel_read(unsigned int pmc_id)
uint32_t high, low;
cpu_get_msr(PMU_INTEL_MSR_PMC0 + pmc_id, &high, &low);
+
return (((uint64_t)high << 32) | low);
}
@@ -220,15 +228,17 @@ pmu_intel_setup(void)
return 0;
}
- if (cpu->cpuid_max_basic < eax)
+ if (cpu->cpuid_max_basic < eax) {
return ENODEV;
+ }
pmu = pmu_intel_get();
cpu_cpuid(&eax, &ebx, &ecx, &edx);
pmu->version = eax & PMU_INTEL_ID_VERSION_MASK;
- if ((pmu->version == 0) || (pmu->version > 3))
+ if ((pmu->version == 0) || (pmu->version > 3)) {
return ENODEV;
+ }
pmu->nr_pmcs = (eax & PMU_INTEL_ID_NR_PMCS_MASK)
>> PMU_INTEL_ID_NR_PMCS_OFFSET;
@@ -236,7 +246,9 @@ pmu_intel_setup(void)
pmu->pmc_width = (eax & PMU_INTEL_ID_PMC_WIDTH_MASK)
>> PMU_INTEL_ID_PMC_WIDTH_OFFSET;
ev_len = (eax & PMU_INTEL_ID_EVLEN_MASK) >> PMU_INTEL_ID_EVLEN_OFFSET;
+
assert(ev_len <= PMU_INTEL_ID_EVLEN_MAX);
+
pmu->events = ~ebx & ((1U << ev_len) - 1);
pmu_driver.info = pmu_intel_info;
diff --git a/kern/perfmon.c b/kern/perfmon.c
index 9c9c257..d42fd09 100644
--- a/kern/perfmon.c
+++ b/kern/perfmon.c
@@ -217,25 +217,29 @@ perfmon_pmc_alloc(struct perfmon_pmc **pmcp, unsigned int raw_event_id)
size_t i;
int error;
- if (perfmon_pmu.nr_pmcs == ARRAY_SIZE(perfmon_pmu.pmcs))
+ if (perfmon_pmu.nr_pmcs == ARRAY_SIZE(perfmon_pmu.pmcs)) {
return EAGAIN;
+ }
for (i = 0; i < ARRAY_SIZE(perfmon_pmu.pmcs); i++) {
pmc = &perfmon_pmu.pmcs[i];
- if (pmc->nr_refs == 0)
+ if (pmc->nr_refs == 0) {
break;
+ }
}
assert(i < ARRAY_SIZE(perfmon_pmu.pmcs));
error = pmu_driver.alloc(&pmc->id, raw_event_id);
- if (error)
+ if (error) {
return error;
+ }
pmc->raw_event_id = raw_event_id;
perfmon_pmu.nr_pmcs++;
*pmcp = pmc;
+
return 0;
}
@@ -245,14 +249,16 @@ perfmon_pmc_lookup(unsigned int raw_event_id)
struct perfmon_pmc *pmc;
size_t i;
- if (perfmon_pmu.nr_pmcs == 0)
+ if (perfmon_pmu.nr_pmcs == 0) {
return NULL;
+ }
for (i = 0; i < ARRAY_SIZE(perfmon_pmu.pmcs); i++) {
pmc = &perfmon_pmu.pmcs[i];
- if ((pmc->nr_refs != 0) && (pmc->raw_event_id == raw_event_id))
+ if ((pmc->nr_refs != 0) && (pmc->raw_event_id == raw_event_id)) {
return pmc;
+ }
}
return NULL;
@@ -272,8 +278,9 @@ perfmon_pmc_get(struct perfmon_pmc **pmcp, const struct perfmon_event *event)
error = perfmon_translate(&raw_event_id, event->type, event->id);
- if (error)
+ if (error) {
return error;
+ }
spinlock_lock(&perfmon_pmu.lock);
@@ -282,8 +289,9 @@ perfmon_pmc_get(struct perfmon_pmc **pmcp, const struct perfmon_event *event)
if (pmc == NULL) {
error = perfmon_pmc_alloc(&pmc, raw_event_id);
- if (error)
+ if (error) {
goto out;
+ }
}
pmc->nr_refs++;
@@ -291,10 +299,11 @@ perfmon_pmc_get(struct perfmon_pmc **pmcp, const struct perfmon_event *event)
out:
spinlock_unlock(&perfmon_pmu.lock);
- if (error)
+ if (error) {
return error;
-
+ }
*pmcp = pmc;
+
return 0;
}
@@ -361,8 +370,9 @@ perfmon_cpu_pmu_init(struct perfmon_cpu_pmu *cpu_pmu)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(cpu_pmu->pmcs); i++)
+ for (i = 0; i < ARRAY_SIZE(cpu_pmu->pmcs); i++) {
cpu_pmu->pmcs[i].nr_refs = 0;
+ }
}
static inline struct perfmon_cpu_pmc *
@@ -438,17 +448,20 @@ perfmon_setup(void)
spinlock_init(&perfmon_pmu.lock);
perfmon_pmu.nr_pmcs = 0;
- for (i = 0; i < ARRAY_SIZE(perfmon_pmu.pmcs); i++)
+ for (i = 0; i < ARRAY_SIZE(perfmon_pmu.pmcs); i++) {
perfmon_pmu.pmcs[i].nr_refs = 0;
+ }
- for (i = 0; i < cpu_count(); i++)
+ for (i = 0; i < cpu_count(); i++) {
perfmon_cpu_pmu_init(percpu_ptr(perfmon_cpu_pmu, i));
+ }
for (i = 0; i < cpu_count(); i++) {
grouplist = perfmon_grouplist_create();
- if (grouplist == NULL)
+ if (grouplist == NULL) {
panic("perfmon: unable to create cpu grouplists");
+ }
percpu_var(perfmon_cpu_grouplist, i) = grouplist;
}
@@ -495,8 +508,9 @@ perfmon_event_create(struct perfmon_event **eventp, unsigned int type,
event = kmem_alloc(sizeof(*event));
- if (event == NULL)
+ if (event == NULL) {
return ENOMEM;
+ }
event->count = 0;
list_node_init(&event->node);
@@ -569,8 +583,9 @@ perfmon_group_create(struct perfmon_group **groupp)
group = kmem_alloc(sizeof(*group));
- if (group == NULL)
+ if (group == NULL) {
return ENOMEM;
+ }
list_init(&group->events);
spinlock_init(&group->lock);
@@ -629,8 +644,9 @@ perfmon_group_attach_pmu(struct perfmon_group *group)
list_for_each_entry(&group->events, event, node) {
error = perfmon_pmc_get(&pmc, event);
- if (error)
+ if (error) {
goto error_pmc;
+ }
event->pmc_index = perfmon_pmc_index(pmc);
}
@@ -639,8 +655,9 @@ perfmon_group_attach_pmu(struct perfmon_group *group)
error_pmc:
list_for_each_entry(&group->events, tmp, node) {
- if (tmp == event)
+ if (tmp == event) {
break;
+ }
perfmon_pmc_put(perfmon_pmc_from_index(tmp->pmc_index));
}
@@ -655,8 +672,9 @@ perfmon_group_detach_pmu(struct perfmon_group *group)
assert(perfmon_group_attached(group));
- list_for_each_entry(&group->events, event, node)
+ list_for_each_entry(&group->events, event, node) {
perfmon_pmc_put(perfmon_pmc_from_index(event->pmc_index));
+ }
}
int
@@ -670,8 +688,9 @@ perfmon_group_attach(struct perfmon_group *group, struct thread *thread)
error = perfmon_group_attach_pmu(group);
- if (error)
+ if (error) {
return error;
+ }
thread_ref(thread);
group->thread = thread;
@@ -698,8 +717,9 @@ perfmon_group_attach_cpu(struct perfmon_group *group, unsigned int cpu)
error = perfmon_group_attach_pmu(group);
- if (error)
+ if (error) {
return error;
+ }
group->cpu = cpu;
group->type = PERFMON_GT_CPU;
@@ -750,8 +770,9 @@ perfmon_group_detach(struct perfmon_group *group)
goto out;
}
- if (!perfmon_group_attached(group))
+ if (!perfmon_group_attached(group)) {
goto out;
+ }
perfmon_group_detach_pmu(group);
list_remove(&group->node);
@@ -1092,8 +1113,9 @@ perfmon_thread_init(struct thread *thread)
grouplist = perfmon_grouplist_create();
- if (grouplist == NULL)
+ if (grouplist == NULL) {
return ENOMEM;
+ }
thread->perfmon_groups = grouplist;
return 0;
diff --git a/test/test_perfmon_thread.c b/test/test_perfmon_thread.c
index 42ef617..e78fab9 100644
--- a/test/test_perfmon_thread.c
+++ b/test/test_perfmon_thread.c
@@ -118,13 +118,11 @@ x15_test_main_run(void *arg)
perfmon_group_update(cpu_group);
cpu_count2 = perfmon_event_read(cpu_ev_cycle);
- if (thread_count1 == thread_count2)
- {
+ if (thread_count1 == thread_count2) {
panic("not monitoring thread after monitoring start \n"
"stayed at %llu cycles\n", thread_count1);
}
- if (cpu_count1 == cpu_count2)
- {
+ if (cpu_count1 == cpu_count2) {
panic("not monitoring cpu after monitoring start \n"
"stayed at %llu cycles\n", cpu_count1);
}
@@ -154,13 +152,11 @@ x15_test_main_run(void *arg)
perfmon_group_update(cpu_group);
cpu_count2 = perfmon_event_read(cpu_ev_cycle);
- if (thread_count1 == thread_count2)
- {
+ if (thread_count1 == thread_count2) {
panic("not monitoring thread after thread re-schedueling\n"
"stayed at %llu cycles\n", thread_count1);
}
- if (cpu_count1 == cpu_count2)
- {
+ if (cpu_count1 == cpu_count2) {
panic("not monitoring cpu after thread got re-scheduled \n"
"stayed at %llu cycles\n", cpu_count1);
}
@@ -215,13 +211,11 @@ x15_test_control_run(void *arg)
perfmon_group_update(cpu_group);
cpu_count2 = perfmon_event_read(cpu_ev_cycle);
- if (thread_count1 != thread_count2)
- {
+ if (thread_count1 != thread_count2) {
panic("still monitoring while thread is unschedueled\n"
"gone from %llu to %llu cycles\n", thread_count1, thread_count2);
}
- if (cpu_count1 == cpu_count2)
- {
+ if (cpu_count1 == cpu_count2) {
panic("not monitoring cpu after thread got unscheduled \n"
"stayed at %llu cycles\n", cpu_count1);
}
diff --git a/test/test_perfmon_torture.c b/test/test_perfmon_torture.c
index f7969fd..763a5b5 100644
--- a/test/test_perfmon_torture.c
+++ b/test/test_perfmon_torture.c
@@ -140,32 +140,32 @@ test_thread_toggle_state(struct test_thread *thread,
group = thread->group;
switch (thread->state) {
- case TEST_RUNNING:
- thread->state = TEST_STOPPING;
- stats->num_thread_started--;
- break;
- case TEST_STOPPED:
- /* restart thread and attach it to the group of the previous thread.
- */
- if (thread->monitored) {
- test_thread_toggle_monitor(thread, stats);
- }
- error = perfmon_group_detach(group);
- error_check(error, "perfmon_group_detach");
- thread_attr_init(&attr, thread->thread->name);
- thread_attr_set_detached(&attr);
- thread->state = TEST_LAUNCHED;
- error = thread_create(&thread->thread, &attr, test_thread_run,
- thread);
- error_check(error, "thread_recreate monitored");
- error = perfmon_group_attach(group, thread->thread);
- error_check(error, "perfmon_group_attach");
- stats->num_thread_start++;
- stats->num_thread_started++;
- break;
- default:
- /* Do nothing if the thread is not in a stable state */
- break;
+ case TEST_RUNNING:
+ thread->state = TEST_STOPPING;
+ stats->num_thread_started--;
+ break;
+ case TEST_STOPPED:
+ /* restart thread and attach it to the group of the previous thread.
+ */
+ if (thread->monitored) {
+ test_thread_toggle_monitor(thread, stats);
+ }
+ error = perfmon_group_detach(group);
+ error_check(error, "perfmon_group_detach");
+ thread_attr_init(&attr, thread->thread->name);
+ thread_attr_set_detached(&attr);
+ thread->state = TEST_LAUNCHED;
+ error = thread_create(&thread->thread, &attr, test_thread_run,
+ thread);
+ error_check(error, "thread_recreate monitored");
+ error = perfmon_group_attach(group, thread->thread);
+ error_check(error, "perfmon_group_attach");
+ stats->num_thread_start++;
+ stats->num_thread_started++;
+ break;
+ default:
+ /* Do nothing if the thread is not in a stable state */
+ break;
}
}