summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorPaul Mackerras2009-05-14 05:29:14 +0200
committerIngo Molnar2009-05-15 16:38:55 +0200
commitef923214a4816c289e4af2d67a9ebb1a31e4ac61 (patch)
treebf850f4f53a4f8391b6b9c0335e58364668586d9 /arch/powerpc/include/asm
parentperf_counter: frequency based adaptive irq_period, 32-bit fix (diff)
downloadkernel-qcow2-linux-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.tar.gz
kernel-qcow2-linux-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.tar.xz
kernel-qcow2-linux-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.zip
perf_counter: powerpc: use u64 for event codes internally
Although the perf_counter API allows 63-bit raw event codes, internally in the powerpc back-end we had been using 32-bit event codes. This expands them to 64 bits so that we can add bits for specifying threshold start/stop events and instruction sampling modes later. This also corrects the return value of can_go_on_limited_pmc; we were returning an event code rather than just a 0/1 value in some circumstances. That didn't particularly matter while event codes were 32-bit, but now that event codes are 64-bit it might, so this fixes it. [ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ] Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/perf_counter.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 56d66c38143b..ceea76a48e3d 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -23,13 +23,13 @@ struct power_pmu {
int max_alternatives;
u64 add_fields;
u64 test_adder;
- int (*compute_mmcr)(unsigned int events[], int n_ev,
+ int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], u64 mmcr[]);
- int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp);
- int (*get_alternatives)(unsigned int event, unsigned int flags,
- unsigned int alt[]);
+ int (*get_constraint)(u64 event, u64 *mskp, u64 *valp);
+ int (*get_alternatives)(u64 event, unsigned int flags,
+ u64 alt[]);
void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
- int (*limited_pmc_event)(unsigned int event);
+ int (*limited_pmc_event)(u64 event);
int limited_pmc5_6; /* PMC5 and PMC6 have limited function */
int n_generic;
int *generic_events;