summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar2008-12-11 08:38:42 +0100
committerIngo Molnar2008-12-11 15:45:49 +0100
commit04289bb9891882202d7e961c4c04d2376930e9f9 (patch)
tree13340847915efc809a62bf91b3cd45e0e0416deb /arch/x86/kernel/cpu/perf_counter.c
parentperf counters: restructure the API (diff)
downloadkernel-qcow2-linux-04289bb9891882202d7e961c4c04d2376930e9f9.tar.gz
kernel-qcow2-linux-04289bb9891882202d7e961c4c04d2376930e9f9.tar.xz
kernel-qcow2-linux-04289bb9891882202d7e961c4c04d2376930e9f9.zip
perf counters: add support for group counters
Impact: add group counters This patch adds the "counter groups" abstraction. Groups of counters behave much like normal 'single' counters, with a few semantic and behavioral extensions on top of that. A counter group is created by creating a new counter with the open() syscall's group-leader group_fd file descriptor parameter pointing to another, already existing counter. Groups of counters are scheduled in and out in one atomic group, and they are also roundrobin-scheduled atomically. Counters that are member of a group can also record events with an (atomic) extended timestamp that extends to all members of the group, if the record type is set to PERF_RECORD_GROUP. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ef1936a871aa..54b4ad0cce68 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -346,18 +346,22 @@ static void perf_save_and_restart(struct perf_counter *counter)
}
static void
-perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
+perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
{
- struct perf_counter_context *ctx = leader->ctx;
- struct perf_counter *counter;
+ struct perf_counter *counter, *group_leader = sibling->group_leader;
int bit;
- list_for_each_entry(counter, &ctx->counters, list) {
- if (counter->hw_event.record_type != PERF_RECORD_SIMPLE ||
- counter == leader)
- continue;
+ /*
+ * Store the counter's own timestamp first:
+ */
+ perf_store_irq_data(sibling, sibling->hw_event.type);
+ perf_store_irq_data(sibling, atomic64_counter_read(sibling));
- if (counter->active) {
+ /*
+ * Then store sibling timestamps (if any):
+ */
+ list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
+ if (!counter->active) {
/*
* When counter was not in the overflow mask, we have to
* read it from hardware. We read it as well, when it
@@ -371,8 +375,8 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_save_and_restart(counter);
}
}
- perf_store_irq_data(leader, counter->hw_event.type);
- perf_store_irq_data(leader, atomic64_counter_read(counter));
+ perf_store_irq_data(sibling, counter->hw_event.type);
+ perf_store_irq_data(sibling, atomic64_counter_read(counter));
}
}
@@ -416,10 +420,6 @@ again:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
- perf_store_irq_data(counter,
- counter->hw_event.type);
- perf_store_irq_data(counter,
- atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);
break;
}