summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra2009-05-23 18:29:00 +0200
committerIngo Molnar2009-05-24 08:24:08 +0200
commit082ff5a2767a0679ee543f14883adbafb631ffbe (patch)
tree5ddf792ed3f80b17bc427edea1dc1d4b4303b4f6 /kernel/perf_counter.c
parentperf_counter: Simplify context cleanup (diff)
downloadkernel-qcow2-linux-082ff5a2767a0679ee543f14883adbafb631ffbe.tar.gz
kernel-qcow2-linux-082ff5a2767a0679ee543f14883adbafb631ffbe.tar.xz
kernel-qcow2-linux-082ff5a2767a0679ee543f14883adbafb631ffbe.zip
perf_counter: Change pctrl() behaviour
Instead of en/dis-abling all counters acting on a particular task, en/dis- able all counters we created. [ v2: fix crash on first counter enable ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090523163012.916937244@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c87
1 files changed, 24 insertions, 63 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0e97f8961333..4c86a6369764 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1076,79 +1076,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
__perf_counter_sched_in(ctx, cpuctx, cpu);
}
-int perf_counter_task_disable(void)
+int perf_counter_task_enable(void)
{
- struct task_struct *curr = current;
- struct perf_counter_context *ctx = curr->perf_counter_ctxp;
struct perf_counter *counter;
- unsigned long flags;
-
- if (!ctx || !ctx->nr_counters)
- return 0;
-
- local_irq_save(flags);
- __perf_counter_task_sched_out(ctx);
-
- spin_lock(&ctx->lock);
-
- /*
- * Disable all the counters:
- */
- perf_disable();
-
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->state != PERF_COUNTER_STATE_ERROR) {
- update_group_times(counter);
- counter->state = PERF_COUNTER_STATE_OFF;
- }
- }
-
- perf_enable();
-
- spin_unlock_irqrestore(&ctx->lock, flags);
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_enable(counter);
+ mutex_unlock(&current->perf_counter_mutex);
return 0;
}
-int perf_counter_task_enable(void)
+int perf_counter_task_disable(void)
{
- struct task_struct *curr = current;
- struct perf_counter_context *ctx = curr->perf_counter_ctxp;
struct perf_counter *counter;
- unsigned long flags;
- int cpu;
-
- if (!ctx || !ctx->nr_counters)
- return 0;
-
- local_irq_save(flags);
- cpu = smp_processor_id();
-
- __perf_counter_task_sched_out(ctx);
-
- spin_lock(&ctx->lock);
- /*
- * Disable all the counters:
- */
- perf_disable();
-
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->state > PERF_COUNTER_STATE_OFF)
- continue;
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled =
- ctx->time - counter->total_time_enabled;
- counter->hw_event.disabled = 0;
- }
- perf_enable();
-
- spin_unlock(&ctx->lock);
-
- perf_counter_task_sched_in(curr, cpu);
-
- local_irq_restore(flags);
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_disable(counter);
+ mutex_unlock(&current->perf_counter_mutex);
return 0;
}
@@ -1416,6 +1363,11 @@ static int perf_release(struct inode *inode, struct file *file)
perf_counter_remove_from_context(counter);
mutex_unlock(&ctx->mutex);
+ mutex_lock(&counter->owner->perf_counter_mutex);
+ list_del_init(&counter->owner_entry);
+ mutex_unlock(&counter->owner->perf_counter_mutex);
+ put_task_struct(counter->owner);
+
free_counter(counter);
put_context(ctx);
@@ -3272,6 +3224,12 @@ SYSCALL_DEFINE5(perf_counter_open,
perf_install_in_context(ctx, counter, cpu);
mutex_unlock(&ctx->mutex);
+ counter->owner = current;
+ get_task_struct(current);
+ mutex_lock(&current->perf_counter_mutex);
+ list_add_tail(&counter->owner_entry, &current->perf_counter_list);
+ mutex_unlock(&current->perf_counter_mutex);
+
fput_light(counter_file, fput_needed2);
out_fput:
@@ -3488,6 +3446,9 @@ void perf_counter_init_task(struct task_struct *child)
child->perf_counter_ctxp = NULL;
+ mutex_init(&child->perf_counter_mutex);
+ INIT_LIST_HEAD(&child->perf_counter_list);
+
/*
* This is executed from the parent task context, so inherit
* counters that have been marked for cloning.