summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-25 14:45:26 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-25 14:55:00 +0200
commit771d7cde144d87f2d1fbee4da3c6234d61f7e42a (patch)
tree21d15c924924b5f6cb91dd2b103337253b46ed58 /kernel
parente527ea312f31e88a7fa5472b71db71c565b0d44f (diff)
perf_counter: Make pctrl() affect inherited counters too
Paul noted that the new ptcrl() didn't work on child counters. Reported-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090525124600.203151469@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 6cdf8248eda..217dbcce2eb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1067,30 +1067,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
__perf_counter_sched_in(ctx, cpuctx, cpu);
}
-int perf_counter_task_enable(void)
-{
- struct perf_counter *counter;
-
- mutex_lock(&current->perf_counter_mutex);
- list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
- perf_counter_enable(counter);
- mutex_unlock(&current->perf_counter_mutex);
-
- return 0;
-}
-
-int perf_counter_task_disable(void)
-{
- struct perf_counter *counter;
-
- mutex_lock(&current->perf_counter_mutex);
- list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
- perf_counter_disable(counter);
- mutex_unlock(&current->perf_counter_mutex);
-
- return 0;
-}
-
static void perf_log_period(struct perf_counter *counter, u64 period);
static void perf_adjust_freq(struct perf_counter_context *ctx)
@@ -1505,6 +1481,30 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+int perf_counter_task_enable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_enable);
+ mutex_unlock(&current->perf_counter_mutex);
+
+ return 0;
+}
+
+int perf_counter_task_disable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_disable);
+ mutex_unlock(&current->perf_counter_mutex);
+
+ return 0;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch