summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/branch.c1
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c1
-rw-r--r--arch/mips/kernel/cevt-ds1287.c1
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c1
-rw-r--r--arch/mips/kernel/cevt-r4k.c6
-rw-r--r--arch/mips/kernel/cevt-sb1250.c1
-rw-r--r--arch/mips/kernel/cevt-smtc.c1
-rw-r--r--arch/mips/kernel/cevt-txx9.c1
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c105
-rw-r--r--arch/mips/kernel/i8253.c1
-rw-r--r--arch/mips/kernel/i8259.c1
-rw-r--r--arch/mips/kernel/irq-gic.c7
-rw-r--r--arch/mips/kernel/irq-rm7000.c1
-rw-r--r--arch/mips/kernel/irq-rm9000.c1
-rw-r--r--arch/mips/kernel/irq.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c1
-rw-r--r--arch/mips/kernel/irq_txx9.c1
-rw-r--r--arch/mips/kernel/kgdb.c213
-rw-r--r--arch/mips/kernel/kprobes.c557
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/mcount.S6
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/perf_event.c601
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1052
-rw-r--r--arch/mips/kernel/prom.c112
-rw-r--r--arch/mips/kernel/ptrace.c29
-rw-r--r--arch/mips/kernel/rtlx.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S16
-rw-r--r--arch/mips/kernel/scall64-64.S14
-rw-r--r--arch/mips/kernel/scall64-n32.S25
-rw-r--r--arch/mips/kernel/scall64-o32.S20
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/signal.c45
-rw-r--r--arch/mips/kernel/signal_n32.c5
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c7
-rw-r--r--arch/mips/kernel/syscall.c61
-rw-r--r--arch/mips/kernel/traps.c83
-rw-r--r--arch/mips/kernel/unaligned.c7
-rw-r--r--arch/mips/kernel/vpe.c3
44 files changed, 2825 insertions, 215 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 7a6ac501cbb5..22b2e0e38617 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_IRQ_GIC) += irq-gic.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
@@ -95,12 +96,14 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_OF) += prom.o
+
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
-EXTRA_CFLAGS += -Werror
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca6c83218caa..6b30fb2caa67 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -126,7 +126,6 @@ void output_thread_defines(void)
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
- OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
OFFSET(THREAD_TRAMP, task_struct, \
thread.irix_trampoline);
OFFSET(THREAD_OLDCTX, task_struct, \
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 0176ed015c89..32103cc2a257 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs)
return -EFAULT;
}
- regs->regs[0] = 0;
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index bfea327c636c..36c3898b76db 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/addrspace.h>
#include <asm/io.h>
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index 00a4da277cbb..939157e397b9 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 392ef3756c56..339f3639b90e 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/irq.h>
#include <asm/gt64120.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 0b2450ceb13f..2f4d7a99bcc2 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
@@ -163,7 +164,6 @@ int c0_compare_int_usable(void)
int __cpuinit r4k_clockevent_init(void)
{
- uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
@@ -188,9 +188,9 @@ int __cpuinit r4k_clockevent_init(void)
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, mips_hpt_frequency);
+
/* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index da78eeaea6e8..590c54f28a81 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -17,6 +17,7 @@
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index b102e4f1630e..2e72d30b2f05 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 218ee6bda935..0b7377361e22 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/time.h>
#include <asm/txx9tmr.h>
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 408d0a07b3a3..b8bb8ba60869 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug __cpuinitdata = -1;
+int daddiu_bug = -1;
static inline void check_daddiu(void)
{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3562b854f2cd..71620e19827a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -25,6 +25,8 @@
#include <asm/system.h>
#include <asm/watch.h>
#include <asm/spram.h>
+#include <asm/uaccess.h>
+
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
@@ -181,12 +183,14 @@ void __init check_wait(void)
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
- case CPU_BCM3302:
- case CPU_BCM6338:
- case CPU_BCM6348:
- case CPU_BCM6358:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
@@ -760,6 +764,9 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
ok = decode_config4(c);
mips_probe_watch_registers(c);
+
+ if (cpu_has_mips_r2)
+ c->core = read_c0_ebase() & 0x3ff;
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
@@ -898,33 +905,37 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
- case PRID_IMP_BCM3302:
- /* same as PRID_IMP_BCM6338 */
- c->cputype = CPU_BCM3302;
- __cpu_name[cpu] = "Broadcom BCM3302";
- break;
- case PRID_IMP_BCM4710:
- c->cputype = CPU_BCM4710;
- __cpu_name[cpu] = "Broadcom BCM4710";
- break;
- case PRID_IMP_BCM6345:
- c->cputype = CPU_BCM6345;
- __cpu_name[cpu] = "Broadcom BCM6345";
+ case PRID_IMP_BMIPS32:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & 0xff;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ }
break;
- case PRID_IMP_BCM6348:
- c->cputype = CPU_BCM6348;
- __cpu_name[cpu] = "Broadcom BCM6348";
+ }
+ case PRID_IMP_BMIPS5000:
+ c->cputype = CPU_BMIPS5000;
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ c->options |= MIPS_CPU_ULRI;
break;
- case PRID_IMP_BCM4350:
- switch (c->processor_id & 0xf0) {
- case PRID_REV_BCM6358:
- c->cputype = CPU_BCM6358;
- __cpu_name[cpu] = "Broadcom BCM6358";
- break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
- }
+ case PRID_IMP_BMIPS4KC:
+ c->cputype = CPU_4KC;
+ __cpu_name[cpu] = "MIPS 4Kc";
break;
}
}
@@ -949,6 +960,12 @@ platform:
if (cpu == 0)
__elf_platform = "octeon";
break;
+ case PRID_IMP_CAVIUM_CN63XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ if (cpu == 0)
+ __elf_platform = "octeon2";
+ break;
default:
printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
@@ -956,6 +973,28 @@ platform:
}
}
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ /* JZRISC does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_JZRISC:
+ c->cputype = CPU_JZRISC;
+ __cpu_name[cpu] = "Ingenic JZRISC";
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
@@ -994,6 +1033,9 @@ __cpuinit void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
+ case PRID_COMP_INGENIC:
+ cpu_probe_ingenic(c, cpu);
+ break;
}
BUG_ON(!__cpu_name[cpu]);
@@ -1030,6 +1072,11 @@ __cpuinit void cpu_probe(void)
c->srsets = 1;
cpu_probe_vmbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
}
__cpuinit void cpu_report(void)
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 94794062a177..2392a7a296d4 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
+#include <linux/irq.h>
#include <asm/delay.h>
#include <asm/i8253.h>
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 27799113332c..c58176cc796b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
+#include <linux/irq.h>
#include <asm/i8259.h>
#include <asm/io.h>
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index b181f2f0ea8e..1774271af848 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -3,12 +3,11 @@
#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
-#include <asm/irq.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
int i;
irq -= _irqbase;
- pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+ pr_debug("%s(%d) called\n", __func__, irq);
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
- if (cpu == X)
+ if (cpu == GIC_UNUSED)
continue;
if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
continue;
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index fb50cc78b28b..9731e8b47862 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <asm/irq_cpu.h>
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index b47e4615ec12..b7e4025b58a8 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index c6345f579a8a..4f93db58a79e 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -151,6 +151,29 @@ void __init init_IRQ(void)
#endif
}
+#ifdef DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+}
+#else
+static inline void check_stack_overflow(void) {}
+#endif
+
+
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
@@ -159,6 +182,7 @@ void __init init_IRQ(void)
void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
+ check_stack_overflow();
__DO_IRQ_SMTC_HOOK(irq);
generic_handle_irq(irq);
irq_exit();
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 55c8a3ca507b..0262abe09121 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index 9b78029bea70..95a96f69172d 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
+#include <linux/irq.h>
#include <asm/txx9irq.h>
struct txx9_irc_reg {
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 9b78ff6e9b84..f4546e97c60d 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -50,6 +50,151 @@ static struct hard_trap_info {
{ 0, 0} /* Must be last */
};
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__(
@@ -84,64 +229,6 @@ static int compute_signal(int tt)
return SIGHUP; /* default for things we don't know about */
}
-void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- int reg;
-
-#if (KGDB_GDB_REG_SIZE == 32)
- u32 *ptr = (u32 *)gdb_regs;
-#else
- u64 *ptr = (u64 *)gdb_regs;
-#endif
-
- for (reg = 0; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
-
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
-
- /* FP REGS */
- if (!(current && (regs->cp0_status & ST0_CU1)))
- return;
-
- save_fp(current);
- for (reg = 0; reg < 32; reg++)
- *(ptr++) = current->thread.fpu.fpr[reg];
-}
-
-void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- int reg;
-
-#if (KGDB_GDB_REG_SIZE == 32)
- const u32 *ptr = (u32 *)gdb_regs;
-#else
- const u64 *ptr = (u64 *)gdb_regs;
-#endif
-
- for (reg = 0; reg < 32; reg++)
- regs->regs[reg] = *(ptr++);
-
- regs->cp0_status = *(ptr++);
- regs->lo = *(ptr++);
- regs->hi = *(ptr++);
- regs->cp0_badvaddr = *(ptr++);
- regs->cp0_cause = *(ptr++);
- regs->cp0_epc = *(ptr++);
-
- /* FP REGS from current */
- if (!(current && (regs->cp0_status & ST0_CU1)))
- return;
-
- for (reg = 0; reg < 32; reg++)
- current->thread.fpu.fpr[reg] = *(ptr++);
- restore_fp(current);
-}
-
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
@@ -196,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
- /* Userpace events, ignore. */
+ /* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
@@ -242,7 +329,7 @@ static struct notifier_block kgdb_notifier = {
};
/*
- * Handle the 's' and 'c' commands
+ * Handle the 'c' command
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
@@ -250,20 +337,14 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
{
char *ptr;
unsigned long address;
- int cpu = smp_processor_id();
switch (remcom_in_buffer[0]) {
- case 's':
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->cp0_epc = address;
- atomic_set(&kgdb_cpu_doing_single_step, -1);
- if (remcom_in_buffer[0] == 's')
- atomic_set(&kgdb_cpu_doing_single_step, cpu);
-
return 0;
}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 000000000000..ee28683fc2ac
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,557 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/break.h>
+#include <asm/inst.h>
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int __kprobes insn_has_delayslot(union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+
+ /*
+ * This group contains:
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jr_op:
+ case jalr_op:
+ break;
+ default:
+ goto insn_ok;
+ }
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op:
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+
+ /*
+ * These are the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ default:
+ break;
+ }
+insn_ok:
+ return 0;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ prev_insn = p->addr[-1];
+ insn = p->addr[0];
+
+ if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch and jump instructions are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ */
+
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ free_insn_slot(p->ainsn.insn, 0);
+}
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs))
+ goto ss_probe;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+ }
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_sp = regs->regs[29];
+
+ memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+ regs->cp0_epc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+ /* Assembler quirk necessitates this '0,code' business. */
+ asm volatile(
+ "break 0,%0\n\t"
+ ".globl jprobe_return_end\n"
+ "jprobe_return_end:\n"
+ : : "n" (BRK_KPROBE_BP) : "memory");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (regs->cp0_epc >= (unsigned long)jprobe_return &&
+ regs->cp0_epc <= (unsigned long)jprobe_return_end) {
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+ preempt_enable_no_resched();
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ instruction_pointer(regs) = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 80e2ba694bab..29811f043399 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0, 0)) == 0)
- ret.retval = tv.tv_sec;
+ ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index c2dab140dc98..6343b4a5b835 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
{
return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
}
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+ u64, a3, u64, a4, int, dfd, const char __user *, pathname)
+{
+ return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+ dfd, pathname);
+}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 6bfcb7a00ec6..4c968e7efb74 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -165,12 +165,12 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
/* arg3: Get frame pointer of current stack */
#ifdef CONFIG_FRAME_POINTER
- move a2, fp
+ move a2, fp
#else /* ! CONFIG_FRAME_POINTER */
#ifdef CONFIG_64BIT
- PTR_LA a2, PT_SIZE(sp)
+ PTR_LA a2, PT_SIZE(sp)
#else
- PTR_LA a2, (PT_SIZE+8)(sp)
+ PTR_LA a2, (PT_SIZE+8)(sp)
#endif
#endif
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 2340f11dc29c..802e6160f37e 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- retval = security_task_setscheduler(p, 0, NULL);
+ retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000000..2b7f3f703b83
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,601 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+/* These are for 32bit counters. For 64bit ones, define them accordingly. */
+#define MAX_PERIOD ((1ULL << 32) - 1)
+#define VALID_COUNT 0x7fffffff
+#define TOTAL_BITS 32
+#define HIGHEST_BIT 31
+
+#define MIPS_MAX_HWEVENTS 4
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * The borrowed MSB for the performance counter. A MIPS performance
+ * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
+ * counters) as a factor of determining whether a counter overflow
+ * should be signaled. So here we use a separate MSB for each
+ * counter to make things easy.
+ */
+ unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+#else
+ #define T
+ #define V
+ #define P
+#endif
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ const char *name;
+ int irq;
+ irqreturn_t (*handle_irq)(int irq, void *dev);
+ int (*handle_shared_irq)(void);
+ void (*start)(void);
+ void (*stop)(void);
+ int (*alloc_counter)(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc);
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ void (*enable_event)(struct hw_perf_event *evt, int idx);
+ void (*disable_event)(int idx);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static const struct mips_pmu *mipspmu;
+
+static int
+mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+ u64 uleft;
+ unsigned long flags;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)MAX_PERIOD)
+ left = MAX_PERIOD;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ local_irq_save(flags);
+ uleft = (u64)(-left) & MAX_PERIOD;
+ uleft > VALID_COUNT ?
+ set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
+ mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
+ local_irq_restore(flags);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static int mipspmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* To look for a free counter for this event. */
+ idx = mipspmu->alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipspmu->disable_event(idx);
+ cpuc->events[idx] = event;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ mipspmu->enable_event(hwc, idx);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ return err;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+ int shift = 64 - TOTAL_BITS;
+ s64 prev_raw_count, new_raw_count;
+ s64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ local_irq_save(flags);
+ /* Make the counter value be a "real" one. */
+ new_raw_count = mipspmu->read_counter(idx);
+ if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
+ new_raw_count &= VALID_COUNT;
+ clear_bit(idx, cpuc->msbs);
+ } else
+ new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
+ local_irq_restore(flags);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ return;
+}
+
+static void mipspmu_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ /* We are working on a local event. */
+ mipspmu->disable_event(idx);
+
+ barrier();
+
+ mipspmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_unthrottle(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static struct pmu pmu = {
+ .enable = mipspmu_enable,
+ .disable = mipspmu_disable,
+ .unthrottle = mipspmu_unthrottle,
+ .read = mipspmu_read,
+};
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu->irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu->irq, mipspmu->handle_irq,
+ IRQF_DISABLED | IRQF_NOBALANCING,
+ "mips_perf_pmu", NULL);
+ if (err) {
+ pr_warning("Unable to request IRQ%d for MIPS "
+ "performance counters!\n", mipspmu->irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipspmu->handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warning("The platform hasn't properly defined its "
+ "interrupt controller.\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu->irq >= 0)
+ free_irq(mipspmu->irq, NULL);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+static inline unsigned int
+mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#endif
+}
+
+static const struct mips_perf_event *
+mipspmu_map_general_event(int idx)
+{
+ const struct mips_perf_event *pev;
+
+ pev = ((*mipspmu->general_event_map)[idx].event_id ==
+ UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
+ &(*mipspmu->general_event_map)[idx]);
+
+ return pev;
+}
+
+static const struct mips_perf_event *
+mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu->cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event fake_hwc = event->hw;
+
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
+
+ return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (!validate_event(&fake_cpuc, leader))
+ return -ENOSPC;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_cpuc, sibling))
+ return -ENOSPC;
+ }
+
+ if (!validate_event(&fake_cpuc, event))
+ return -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu->num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return ERR_PTR(-ENODEV);
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+ atomic_dec(&active_events);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return ERR_PTR(err);
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err ? ERR_PTR(err) : &pmu;
+}
+
+void hw_perf_enable(void)
+{
+ if (mipspmu)
+ mipspmu->start();
+}
+
+void hw_perf_disable(void)
+{
+ if (mipspmu)
+ mipspmu->stop();
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void
+handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, 0, data, regs))
+ mipspmu->disable_event(idx);
+}
+
+#include "perf_event_mipsxx.c"
+
+/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we add here.
+ */
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+}
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000000..5c7c6fc07565
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1052 @@
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
+ defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
+
+#define M_CONFIG1_PC (1 << 4)
+
+#define M_PERFCTL_EXL (1UL << 0)
+#define M_PERFCTL_KERNEL (1UL << 1)
+#define M_PERFCTL_SUPERVISOR (1UL << 2)
+#define M_PERFCTL_USER (1UL << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1UL << 30)
+#define M_PERFCTL_MORE (1UL << 31)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
+ M_PERFCTL_KERNEL | \
+ M_PERFCTL_USER | \
+ M_PERFCTL_SUPERVISOR | \
+ M_PERFCTL_INTERRUPT_ENABLE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+#define M_PERFCTL_EVENT_MASK 0xfe0
+
+#define M_COUNTER_OVERFLOW (1UL << 31)
+
+#ifdef CONFIG_MIPS_MT_SMP
+static int cpu_has_mipsmt_pertccounters;
+
+/*
+ * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
+ * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
+ */
+#if defined(CONFIG_HW_PERF_EVENTS)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : smp_processor_id())
+#else
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_data[smp_processor_id()].vpe_id)
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static inline unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+#else /* !CONFIG_MIPS_MT_SMP */
+#define vpe_id() 0
+
+static inline unsigned int vpe_shift(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_MT_SMP */
+
+static inline unsigned int
+counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+static inline unsigned int
+counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
+#define __define_perf_accessors(r, n, np) \
+ \
+static inline unsigned int r_c0_ ## r ## n(void) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ return read_c0_ ## r ## n(); \
+ case 1: \
+ return read_c0_ ## r ## np(); \
+ default: \
+ BUG(); \
+ } \
+ return 0; \
+} \
+ \
+static inline void w_c0_ ## r ## n(unsigned int value) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ write_c0_ ## r ## n(value); \
+ return; \
+ case 1: \
+ write_c0_ ## r ## np(value); \
+ return; \
+ default: \
+ BUG(); \
+ } \
+ return; \
+} \
+
+__define_perf_accessors(perfcntr, 0, 2)
+__define_perf_accessors(perfcntr, 1, 3)
+__define_perf_accessors(perfcntr, 2, 0)
+__define_perf_accessors(perfcntr, 3, 1)
+
+__define_perf_accessors(perfctrl, 0, 2)
+__define_perf_accessors(perfctrl, 1, 3)
+__define_perf_accessors(perfctrl, 2, 0)
+__define_perf_accessors(perfctrl, 3, 1)
+
+static inline int __n_counters(void)
+{
+ if (!(read_c0_config1() & M_CONFIG1_PC))
+ return 0;
+ if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
+ return 1;
+ if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
+ return 2;
+ if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
+ return 3;
+
+ return 4;
+}
+
+static inline int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ w_c0_perfcntr3(0);
+ case 3:
+ w_c0_perfctrl2(0);
+ w_c0_perfcntr2(0);
+ case 2:
+ w_c0_perfctrl1(0);
+ w_c0_perfcntr1(0);
+ case 1:
+ w_c0_perfctrl0(0);
+ w_c0_perfcntr0(0);
+ }
+}
+
+static inline u64
+mipsxx_pmu_read_counter(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfcntr0();
+ case 1:
+ return r_c0_perfcntr1();
+ case 2:
+ return r_c0_perfcntr2();
+ case 3:
+ return r_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfcntr0(val);
+ return;
+ case 1:
+ w_c0_perfcntr1(val);
+ return;
+ case 2:
+ w_c0_perfcntr2(val);
+ return;
+ case 3:
+ w_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static inline unsigned int
+mipsxx_pmu_read_control(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfctrl0();
+ case 1:
+ return r_c0_perfctrl1();
+ case 2:
+ return r_c0_perfctrl2();
+ case 3:
+ return r_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfctrl0(val);
+ return;
+ case 1:
+ w_c0_perfctrl1(val);
+ return;
+ case 2:
+ w_c0_perfctrl2(val);
+ return;
+ case 3:
+ w_c0_perfctrl3(val);
+ return;
+ }
+}
+
+#ifdef CONFIG_MIPS_MT_SMP
+static DEFINE_RWLOCK(pmuint_rwlock);
+#endif
+
+/* 24K/34K/1004K cores can share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 74K core has different branch event code. */
+static const struct mips_perf_event mipsxx74Kcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 24K/34K/1004K cores can share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+/* 74K core has completely different cache event map. */
+static const struct mips_perf_event mipsxx74Kcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ /* 74K core does not have specific DTLB events. */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+#ifdef CONFIG_MIPS_MT_SMP
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->cpu >= 0) {
+ if (pev->range > V) {
+ /*
+ * The user selected an event that is processor
+ * wide, while expecting it to be VPE wide.
+ */
+ hwc->config_base |= M_TC_EN_ALL;
+ } else {
+ /*
+ * FIXME: cpu_data[event->cpu].vpe_id reports 0
+ * for both CPUs.
+ */
+ hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
+ hwc->config_base |= M_TC_EN_VPE;
+ }
+ } else
+ hwc->config_base |= M_TC_EN_ALL;
+}
+#else
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+}
+#endif
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu->map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
+
+ /* Calculate range bits and validate it. */
+ if (num_possible_cpus() > 1)
+ check_and_calc_range(event, pev);
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= M_PERFCTL_USER;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= M_PERFCTL_KERNEL;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= M_PERFCTL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= M_PERFCTL_SUPERVISOR;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = MAX_PERIOD;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event) {
+ err = validate_group(event);
+ if (err)
+ return -EINVAL;
+ }
+
+ event->destroy = hw_perf_event_destroy;
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ cpuc->saved_ctrl[3] = r_c0_perfctrl3();
+ w_c0_perfctrl3(cpuc->saved_ctrl[3] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 3:
+ cpuc->saved_ctrl[2] = r_c0_perfctrl2();
+ w_c0_perfctrl2(cpuc->saved_ctrl[2] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 2:
+ cpuc->saved_ctrl[1] = r_c0_perfctrl1();
+ w_c0_perfctrl1(cpuc->saved_ctrl[1] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 1:
+ cpuc->saved_ctrl[0] = r_c0_perfctrl0();
+ w_c0_perfctrl0(cpuc->saved_ctrl[0] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ }
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(cpuc->saved_ctrl[3]);
+ case 3:
+ w_c0_perfctrl2(cpuc->saved_ctrl[2]);
+ case 2:
+ w_c0_perfctrl1(cpuc->saved_ctrl[1]);
+ case 1:
+ w_c0_perfctrl0(cpuc->saved_ctrl[0]);
+ }
+ local_irq_restore(flags);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu->num_counters;
+ unsigned int counter;
+ int handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ return handled;
+
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ if (test_bit(n, cpuc->used_mask)) { \
+ counter = r_c0_perfcntr ## n(); \
+ if (counter & M_COUNTER_OVERFLOW) { \
+ w_c0_perfcntr ## n(counter & \
+ VALID_COUNT); \
+ if (test_and_change_bit(n, cpuc->msbs)) \
+ handle_associated_event(cpuc, \
+ n, &data, regs); \
+ handled = IRQ_HANDLED; \
+ } \
+ }
+ HANDLE_COUNTER(3)
+ HANDLE_COUNTER(2)
+ HANDLE_COUNTER(1)
+ HANDLE_COUNTER(0)
+ }
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ perf_event_do_pending();
+
+#ifdef CONFIG_MIPS_MT_SMP
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+ return handled;
+}
+
+static irqreturn_t
+mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+static void mipsxx_pmu_start(void)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed accross CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipsxx_pmu_stop(void)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static int
+mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu->num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void
+mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ M_PERFCTL_INTERRUPT_ENABLE;
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+ local_irq_restore(flags);
+}
+
+static void
+mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+/* 24K */
+#define IS_UNSUPPORTED_24K_EVENT(r, b) \
+ ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
+ (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
+ (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
+ (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_UNSUPPORTED_34K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
+ (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_UNSUPPORTED_74K_EVENT(r, b) \
+ ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
+ ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
+ (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
+ (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
+ (b) == 61 || (r) == 62 || (r) == 191 || \
+ ((b) >= 64 && (b) <= 127))
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
+ (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/*
+ * User can use 0-255 raw events, where 0-127 for the events of even
+ * counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the parity. So, for example, when user wants to take the
+ * Event Num of 15 for odd counters (by referring to the user manual),
+ * then 128 needs to be added to 15 as the input for the event config,
+ * i.e., 143 (0x8F) to be used.
+ */
+static const struct mips_perf_event *
+mipsxx_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_1004K:
+ if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ }
+
+ return &raw_event;
+}
+
+static struct mips_pmu mipsxxcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxxcore_event_map,
+ .cache_event_map = &mipsxxcore_cache_map,
+};
+
+static struct mips_pmu mipsxx74Kcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxx74Kcore_event_map,
+ .cache_event_map = &mipsxx74Kcore_cache_map,
+};
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+#ifdef MSC01E_INT_BASE
+ if (cpu_has_veic) {
+ /*
+ * Using platform specific interrupt controller defines.
+ */
+ irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else {
+#endif
+ if (cp0_perfcount_irq >= 0)
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+#ifdef MSC01E_INT_BASE
+ }
+#endif
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipsxxcore_pmu.name = "mips/24K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_34K:
+ mipsxxcore_pmu.name = "mips/34K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_74K:
+ mipsxx74Kcore_pmu.name = "mips/74K";
+ mipsxx74Kcore_pmu.num_counters = counters;
+ mipsxx74Kcore_pmu.irq = irq;
+ mipspmu = &mipsxx74Kcore_pmu;
+ break;
+ case CPU_1004K:
+ mipsxxcore_pmu.name = "mips/1004K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ if (mipspmu)
+ pr_cont("%s PMU enabled, %d counters available to each "
+ "CPU, irq %d%s\n", mipspmu->name, counters, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ return 0;
+}
+arch_initcall(init_hw_perf_events);
+
+#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 000000000000..e000b278f024
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,112 @@
+/*
+ * MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/initrd.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/page.h>
+#include <asm/prom.h>
+
+int __init early_init_dt_scan_memory_arch(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ return early_init_dt_scan_memory(node, uname, depth, data);
+}
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ return add_memory_region(base, size, BOOT_MEM_RAM);
+}
+
+int __init reserve_mem_mach(unsigned long addr, unsigned long size)
+{
+ return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
+}
+
+void __init free_mem_mach(unsigned long addr, unsigned long size)
+{
+ return free_bootmem(addr, size);
+}
+
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return virt_to_phys(
+ __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS))
+ );
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
+/*
+ * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
+ *
+ * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
+ * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
+ * supported.
+ */
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+void __init early_init_devtree(void *params)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+
+ /* Scan memory nodes */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = initial_boot_params->totalsize;
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_mem_mach(base, size);
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_mem_mach(base, size);
+}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c51b95ff8644..d21c388c0116 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
- ret = ptrace_get_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
- ret = ptrace_set_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
@@ -536,7 +537,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
/* do the secure computing check first */
if (!entryexit)
- secure_computing(regs->regs[0]);
+ secure_computing(regs->regs[2]);
if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
@@ -565,7 +566,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
out:
if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[0],
+ audit_syscall_entry(audit_arch(), regs->regs[2],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 26f9b9ab19cc..557ef72472e0 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -468,7 +468,8 @@ static const struct file_operations rtlx_fops = {
.release = file_release,
.write = file_write,
.read = file_read,
- .poll = file_poll
+ .poll = file_poll,
+ .llseek = noop_llseek,
};
static struct irqaction rtlx_irq = {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 17202bbe843f..fbaabad0e6e2 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -63,9 +63,9 @@ stack_done:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -104,9 +104,9 @@ syscall_trace_entry:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
j syscall_exit
@@ -169,8 +169,7 @@ stackargs:
* We probably should handle this case a bit more drastic.
*/
bad_stack:
- negu v0 # error
- sw v0, PT_R0(sp)
+ li v0, EFAULT
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
@@ -583,7 +582,10 @@ einval: li v0, -ENOSYS
sys sys_rt_tgsigqueueinfo 4
sys sys_perf_event_open 5
sys sys_accept4 4
- sys sys_recvmmsg 5
+ sys sys_recvmmsg 5 /* 4335 */
+ sys sys_fanotify_init 2
+ sys sys_fanotify_mark 6
+ sys sys_prlimit64 4
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a8a6c596eb04..3f4179283207 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
n64_syscall_exit:
@@ -109,8 +109,9 @@ syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -416,9 +417,12 @@ sys_call_table:
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
- PTR sys_pwritev /* 5390 */
+ PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR sys_recvmmsg
+ PTR sys_recvmmsg
+ PTR sys_fanotify_init /* 5295 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a4faceea9d88..f08ece6d8acc 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
local_irq_disable # make sure need_resched and
@@ -106,8 +107,9 @@ n32_syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -320,10 +322,10 @@ EXPORT(sysn32_call_table)
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
- PTR sys_io_setup /* 6200 */
+ PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 6205 */
PTR sys_lookup_dcookie
@@ -400,24 +402,27 @@ EXPORT(sysn32_call_table)
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR compat_sys_utimensat
- PTR compat_sys_signalfd /* 5280 */
+ PTR compat_sys_signalfd /* 6280 */
PTR sys_ni_syscall
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create
- PTR compat_sys_timerfd_gettime /* 5285 */
+ PTR compat_sys_timerfd_gettime /* 6285 */
PTR compat_sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1
- PTR sys_dup3 /* 5290 */
+ PTR sys_dup3 /* 6290 */
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev
- PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
+ PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
- PTR sys_getdents
+ PTR sys_getdents64
+ PTR sys_fanotify_init /* 6300 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 813689ef2384..78d768a3e19d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -142,8 +143,9 @@ trace_a_syscall:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -154,8 +156,7 @@ trace_a_syscall:
* The stackpointer for a call with more than 4 arguments is bad.
*/
bad_stack:
- dnegu v0 # error
- sd v0, PT_R0(sp)
+ li v0, EFAULT
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
@@ -444,10 +445,10 @@ sys_call_table:
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
- PTR sys_io_setup
+ PTR compat_sys_io_setup
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
PTR sys32_lookup_dcookie
@@ -538,5 +539,8 @@ sys_call_table:
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR compat_sys_recvmmsg
+ PTR compat_sys_recvmmsg /* 4335 */
+ PTR sys_fanotify_init
+ PTR sys_32_fanotify_mark
+ PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 85aef3fc6716..acd3f2c49c06 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -31,6 +31,7 @@
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h>
+#include <asm/prom.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@@ -487,7 +488,9 @@ static void __init arch_mem_init(char **cmdline_p)
}
bootmem_init();
+ device_tree_init();
sparse_init();
+ plat_swiotlb_setup();
paging_init();
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2099d5a4c4b7..5922342bca39 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
- stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
else if (sig)
force_sig(sig, current);
- if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
- goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+ do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
@@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
- switch(regs->regs[0]) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
+ if (regs->regs[0]) {
+ switch(regs->regs[2]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
+ case ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->regs[7] = regs->regs[26];
+ regs->regs[2] = regs->regs[0];
+ regs->cp0_epc -= 4;
}
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
@@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
ret = abi->setup_frame(vdso + abi->signal_return_offset,
ka, regs, sig, oldset);
+ if (ret)
+ return ret;
+
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs)
return;
}
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
if (regs->regs[0]) {
if (regs->regs[2] == ERESTARTNOHAND ||
regs->regs[2] == ERESTARTSYS ||
regs->regs[2] == ERESTARTNOINTR) {
+ regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
+ regs->cp0_epc -= 4;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
regs->regs[2] = current->thread.abi->restart;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 2c5df818c65a..ee24d814d5b9 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
+ mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
@@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+ set_fs(old_fs);
+
/*
* Don't let your children do this ...
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6cdca1956b77..383aeb95cb49 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -47,8 +47,12 @@
#endif /* CONFIG_MIPS_MT_SMTC */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
+
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a95dea5459c4..39c08254b0f1 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -975,8 +975,7 @@ void ipi_decode(struct smtc_ipi *pipi)
ipi_call_interrupt();
break;
default:
- printk("Impossible SMTC IPI Argument 0x%x\n",
- (int)arg_copy);
+ printk("Impossible SMTC IPI Argument %p\n", arg_copy);
break;
}
break;
@@ -1039,7 +1038,7 @@ void deferred_smtc_ipi(void)
* but it's more efficient, given that we're already
* running down the IPI queue.
*/
- __raw_local_irq_restore(flags);
+ __arch_local_irq_restore(flags);
}
}
@@ -1191,7 +1190,7 @@ void smtc_ipi_replay(void)
/*
** But use a raw restore here to avoid recursion.
*/
- __raw_local_irq_restore(flags);
+ __arch_local_irq_restore(flags);
if (pipi) {
self_ipi(pipi);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index dd81b0f87518..1dc6edff45e0 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -29,6 +29,8 @@
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/elf.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -116,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vmm || addr + len <= vmm->vm_start))
return addr;
}
- addr = TASK_UNMAPPED_BASE;
+ addr = current->mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
@@ -134,6 +136,51 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ random_factor = random_factor << PAGE_SHIFT;
+ if (TASK_IS_32BIT_ADDR)
+ random_factor &= 0xfffffful;
+ else
+ random_factor &= 0xffffffful;
+ }
+
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+}
+
+static inline unsigned long brk_rnd(void)
+{
+ unsigned long rnd = get_random_int();
+
+ rnd = rnd << PAGE_SHIFT;
+ /* 8MB for 32bit, 256MB for 64bit */
+ if (TASK_IS_32BIT_ADDR)
+ rnd = rnd & 0x7ffffful;
+ else
+ rnd = rnd & 0xffffffful;
+
+ return rnd;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long base = mm->brk;
+ unsigned long ret;
+
+ ret = PAGE_ALIGN(base + brk_rnd());
+
+ if (ret < mm->brk)
+ return mm->brk;
+
+ return ret;
+}
+
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
@@ -207,12 +254,14 @@ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
int error;
char * filename;
- filename = getname((char __user *) (long)regs.regs[4]);
+ filename = getname((const char __user *) (long)regs.regs[4]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
- error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
- (char __user *__user *) (long)regs.regs[6], &regs);
+ error = do_execve(filename,
+ (const char __user *const __user *) (long)regs.regs[5],
+ (const char __user *const __user *) (long)regs.regs[6],
+ &regs);
putname(filename);
out:
@@ -389,7 +438,9 @@ asmlinkage void bad_stack(void)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
{
register unsigned long __a0 asm("$4") = (unsigned long) filename;
register unsigned long __a1 asm("$5") = (unsigned long) argv;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 852780868fb4..8e9fbe75894e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -25,8 +25,11 @@
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -50,7 +53,6 @@
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
-#include <asm/irq.h>
#include <asm/uasm.h>
extern void check_wait(void);
@@ -334,7 +336,7 @@ void show_regs(struct pt_regs *regs)
__show_regs((struct pt_regs *)regs);
}
-void show_registers(const struct pt_regs *regs)
+void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
@@ -356,9 +358,14 @@ void show_registers(const struct pt_regs *regs)
printk("\n");
}
+static int regs_to_trapnr(struct pt_regs *regs)
+{
+ return (regs->cp0_cause >> 2) & 0x1f;
+}
+
static DEFINE_SPINLOCK(die_lock);
-void __noreturn die(const char * str, struct pt_regs * regs)
+void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
@@ -366,7 +373,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
- notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+ notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV);
console_verbose();
spin_lock_irq(&die_lock);
@@ -375,7 +382,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
- if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
printk("%s[#%d]:\n", str, ++die_counter);
@@ -449,7 +456,7 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
- if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0)
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
== NOTIFY_STOP)
return;
@@ -570,10 +577,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == LL)
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_ll(regs, opcode);
- if ((opcode & OPCODE) == SC)
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_sc(regs, opcode);
+ }
return -1; /* Must be something else ... */
}
@@ -589,6 +602,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
@@ -624,8 +639,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return 0;
+ }
return -1; /* Must be something else ... */
}
@@ -650,7 +668,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info;
- if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0)
+ if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
return;
die_if_kernel("FP exception in kernel code", regs);
@@ -713,11 +731,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
/*
@@ -783,6 +801,25 @@ asmlinkage void do_bp(struct pt_regs *regs)
if (bcode >= (1 << 10))
bcode >>= 10;
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ return;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ return;
+ else
+ break;
+ default:
+ break;
+ }
+
do_trap_or_bp(regs, bcode, "Break");
return;
@@ -815,7 +852,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
unsigned int opcode = 0;
int status = -1;
- if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0)
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
@@ -907,11 +944,6 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block default_cu2_notifier = {
- .notifier_call = default_cu2_call,
- .priority = 0x80000000, /* Run last */
-};
-
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
@@ -1449,6 +1481,7 @@ void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
+ unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
@@ -1481,14 +1514,14 @@ void __cpuinit per_cpu_trap_init(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
- if (cpu_has_mips_r2) {
- unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits;
+ if (cpu_has_mips_r2)
+ hwrena |= 0x0000000f;
- if (!noulri && cpu_has_userlocal)
- enable |= (1 << 29);
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= (1 << 29);
- write_c0_hwrena(enable);
- }
+ if (hwrena)
+ write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
@@ -1734,5 +1767,5 @@ void __init trap_init(void)
sort_extable(__start___dbe_table, __stop___dbe_table);
- register_cu2_notifier(&default_cu2_notifier);
+ cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 69b039ca8d83..cfea1adfa153 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -78,6 +78,8 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
@@ -109,7 +111,8 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;
- regs->regs[0] = 0;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
/*
* This load never faults.
@@ -513,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
unsigned int __user *pc;
mm_segment_t seg;
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2bd2151c586a..3eb3cde2f661 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1192,7 +1192,8 @@ static const struct file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
- .write = vpe_write
+ .write = vpe_write,
+ .llseek = noop_llseek,
};
/* module wrapper entry points */