summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra2009-03-23 18:22:06 +0100
committerIngo Molnar2009-04-06 09:30:25 +0200
commitf4a2deb4860497f4332cf6a1acddab3dd628ddf0 (patch)
tree1655c7c000edce20d2c5b54cf12f99c23340371e /kernel/perf_counter.c
parentperf_counter tools: when no command is feed to perfstat, display help and exit (diff)
downloadkernel-qcow2-linux-f4a2deb4860497f4332cf6a1acddab3dd628ddf0.tar.gz
kernel-qcow2-linux-f4a2deb4860497f4332cf6a1acddab3dd628ddf0.tar.xz
kernel-qcow2-linux-f4a2deb4860497f4332cf6a1acddab3dd628ddf0.zip
perf_counter: remove the event config bitfields
Since the bitfields turned into a bit of a mess, remove them and rely on good old masks. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090323172417.059499915@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f054b8c9bf96..ca14fc41ccdf 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1379,7 +1379,7 @@ static void perf_counter_handle_group(struct perf_counter *counter)
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
if (sub != counter)
sub->hw_ops->read(sub);
- perf_counter_store_irq(counter, sub->hw_event.event_config);
+ perf_counter_store_irq(counter, sub->hw_event.config);
perf_counter_store_irq(counter, atomic64_read(&sub->count));
}
}
@@ -1489,13 +1489,13 @@ static int perf_swcounter_match(struct perf_counter *counter,
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return 0;
- if (counter->hw_event.raw_type)
+ if (perf_event_raw(&counter->hw_event))
return 0;
- if (counter->hw_event.type != type)
+ if (perf_event_type(&counter->hw_event) != type)
return 0;
- if (counter->hw_event.event_id != event)
+ if (perf_event_id(&counter->hw_event) != event)
return 0;
if (counter->hw_event.exclude_user && user_mode(regs))
@@ -1757,13 +1757,13 @@ extern void ftrace_profile_disable(int);
static void tp_perf_counter_destroy(struct perf_counter *counter)
{
- ftrace_profile_disable(counter->hw_event.event_id);
+ ftrace_profile_disable(perf_event_id(&counter->hw_event));
}
static const struct hw_perf_counter_ops *
tp_perf_counter_init(struct perf_counter *counter)
{
- int event_id = counter->hw_event.event_id;
+ int event_id = perf_event_id(&counter->hw_event);
int ret;
ret = ftrace_profile_enable(event_id);
@@ -1797,7 +1797,7 @@ sw_perf_counter_init(struct perf_counter *counter)
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->hw_event.event_id) {
+ switch (perf_event_id(&counter->hw_event)) {
case PERF_COUNT_CPU_CLOCK:
hw_ops = &perf_ops_cpu_clock;
@@ -1882,9 +1882,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
hw_ops = NULL;
- if (hw_event->raw_type)
+ if (perf_event_raw(hw_event)) {
hw_ops = hw_perf_counter_init(counter);
- else switch (hw_event->type) {
+ goto done;
+ }
+
+ switch (perf_event_type(hw_event)) {
case PERF_TYPE_HARDWARE:
hw_ops = hw_perf_counter_init(counter);
break;
@@ -1902,6 +1905,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
kfree(counter);
return NULL;
}
+done:
counter->hw_ops = hw_ops;
return counter;