summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-10 21:34:59 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-11 02:39:02 +0200
commit9e350de37ac9607012fcf9c5314a28fbddf8f43c (patch)
treed0f311bcf49d887e6d488ea72b2913cb00eaf910 /arch/x86
parentdf1a132bf3d3508f863336c80a27806a2ac947e0 (diff)
perf_counter: Accurate period data
We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is incorrect. When we adjust the period, it will only take effect the next cycle but report it for the current cycle. So when we adjust the period for every cycle, we're always wrong. Solve this by keeping track of the last_period. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 82a23d487f9..57ae1bec81b 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
+ hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
}
@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
+ hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
+ hwc->last_period = period;
ret = 1;
}
/*
@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;
- /* counter overflow */
- handled = 1;
- inc_irq_stat(apic_perf_irqs);
+ /*
+ * counter overflow
+ */
+ handled = 1;
+ data.period = counter->hw.last_period;
+
if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;
@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
amd_pmu_disable_counter(hwc, idx);
}
+ if (handled)
+ inc_irq_stat(apic_perf_irqs);
+
return handled;
}