summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile17
-rw-r--r--arch/mips/kernel/branch.c73
-rw-r--r--arch/mips/kernel/cmpxchg.c109
-rw-r--r--arch/mips/kernel/cps-vec.S11
-rw-r--r--arch/mips/kernel/cpu-probe.c83
-rw-r--r--arch/mips/kernel/elf.c12
-rw-r--r--arch/mips/kernel/genex.S13
-rw-r--r--arch/mips/kernel/idle.c1
-rw-r--r--arch/mips/kernel/jump_label.c2
-rw-r--r--arch/mips/kernel/mips-cm.c130
-rw-r--r--arch/mips/kernel/mips-cpc.c17
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c16
-rw-r--r--arch/mips/kernel/module-rela.c202
-rw-r--r--arch/mips/kernel/module.c224
-rw-r--r--arch/mips/kernel/octeon_switch.S11
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c17
-rw-r--r--arch/mips/kernel/pm-cps.c17
-rw-r--r--arch/mips/kernel/proc.c9
-rw-r--r--arch/mips/kernel/process.c102
-rw-r--r--arch/mips/kernel/ptrace.c29
-rw-r--r--arch/mips/kernel/r2300_fpu.S80
-rw-r--r--arch/mips/kernel/r2300_switch.S81
-rw-r--r--arch/mips/kernel/r4k_fpu.S196
-rw-r--r--arch/mips/kernel/r4k_switch.S203
-rw-r--r--arch/mips/kernel/r6000_fpu.S99
-rw-r--r--arch/mips/kernel/scall32-o32.S13
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S8
-rw-r--r--arch/mips/kernel/setup.c40
-rw-r--r--arch/mips/kernel/signal32.c19
-rw-r--r--arch/mips/kernel/smp-bmips.c10
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-cps.c177
-rw-r--r--arch/mips/kernel/smp-mt.c14
-rw-r--r--arch/mips/kernel/smp-up.c5
-rw-r--r--arch/mips/kernel/smp.c39
-rw-r--r--arch/mips/kernel/syscall.c19
-rw-r--r--arch/mips/kernel/time.c14
-rw-r--r--arch/mips/kernel/traps.c32
-rw-r--r--arch/mips/kernel/unaligned.c215
-rw-r--r--arch/mips/kernel/vdso.c15
42 files changed, 1256 insertions, 1128 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9a0e37b92ce0..07f0f4a4b562 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
+obj-y += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
process.o prom.o ptrace.o reset.o setup.o signal.o \
syscall.o time.o topology.o traps.o unaligned.o watch.o \
vdso.o cacheinfo.o
@@ -31,16 +31,19 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_DEBUG_FS) += segment.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
-obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
+sw-y := r4k_switch.o
+sw-$(CONFIG_CPU_R3000) := r2300_switch.o
+sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o
+sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o
+obj-y += $(sw-y)
+
+obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o
+obj-$(CONFIG_CPU_R3000) += r2300_fpu.o
+obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index f702a459a830..b79ed9af9886 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
- * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * @returns: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
@@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Fall through */
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
- goto sigill_r6;
+ goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bltzall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bgezall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* These are unconditional and in j_format.
*/
+ case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
@@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
*/
case beql_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bnel_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case blez_op:
/*
* Compact branches for R6 for the
@@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -774,35 +771,27 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
#else
case bc6_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
regs->cp0_epc += 8;
break;
case balc6_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BALC */
regs->regs[31] = epc + 4;
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
case pop66_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
case pop76_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BNEZC || JIALC */
if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
@@ -814,10 +803,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case pop10_op:
case pop30_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/*
* Compact branches:
* bovc, beqc, beqzalc, bnvc, bnec, bnezlac
@@ -831,12 +818,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
return ret;
sigill_dsp:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
- force_sig(SIGBUS, current);
+ pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
+ return -EFAULT;
+sigill_r2r6:
+ pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
return -EFAULT;
sigill_r6:
- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
- current->comm);
+ pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
+ current->comm);
force_sig(SIGILL, current);
return -EFAULT;
}
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
new file mode 100644
index 000000000000..7730f1d3434f
--- /dev/null
+++ b/arch/mips/kernel/cmpxchg.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
+{
+ u32 old32, new32, load32, mask;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask value to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ val &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * exchange within the naturally aligned 4 byte integerthat includes
+ * it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & ~mask) | (val << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> shift;
+}
+
+unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ u32 mask, old32, new32, load32;
+ volatile u32 *ptr32;
+ unsigned int shift;
+ u8 load;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ while (true) {
+ /*
+ * Ensure the byte we want to exchange matches the expected
+ * old value, and if not then bail.
+ */
+ load = (load32 & mask) >> shift;
+ if (load != old)
+ return load;
+
+ /*
+ * Calculate the old & new values of the naturally aligned
+ * 4 byte integer that include the byte we want to exchange.
+ * Attempt to exchange the old value for the new value, and
+ * return if we succeed.
+ */
+ old32 = (load32 & ~mask) | (old << shift);
+ new32 = (load32 & ~mask) | (new << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index a00e87b0256d..d173b49f212d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -22,6 +22,7 @@
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
+#define CPC_CL_VC_STOP_OFS 0x2020
#define CPC_CL_VC_RUN_OFS 0x2028
.extern mips_cm_base
@@ -326,8 +327,8 @@ LEAF(mips_cps_get_bootcfg)
* to handle contiguous VP numbering, but no such systems yet
* exist.
*/
- mfc0 t9, $3, 1
- andi t9, t9, 0xff
+ mfc0 t9, CP0_GLOBALNUMBER
+ andi t9, t9, MIPS_GLOBALNUMBER_VP
#elif defined(CONFIG_MIPS_MT_SMP)
has_mt ta2, 1f
@@ -376,8 +377,12 @@ LEAF(mips_cps_boot_vpes)
PTR_LI t2, UNCAC_BASE
PTR_ADD t1, t1, t2
- /* Set VC_RUN to the VPE mask */
+ /* Start any other VPs that ought to be running */
PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
+
+ /* Ensure this VP stops running if it shouldn't be */
+ not ta2
+ PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
ehb
#elif defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 1aba27786bd5..cf3fd549e16d 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -326,7 +326,7 @@ static int __init fpu_disable(char *s)
__setup("nofpu", fpu_disable);
-int mips_dsp_disabled;
+static int mips_dsp_disabled;
static int __init dsp_disable(char *s)
{
@@ -564,6 +564,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
back_to_back_c0_hazard();
break;
case CPU_I6400:
+ case CPU_I6500:
/* There's no way to disable the FTLB */
if (!(flags & FTLB_EN))
return 1;
@@ -844,6 +845,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MVH;
if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
c->options |= MIPS_CPU_VP;
+ if (config5 & MIPS_CONF5_CA2)
+ c->ases |= MIPS_ASE_MIPS16E2;
return config5 & MIPS_CONF_M;
}
@@ -916,9 +919,12 @@ static void decode_configs(struct cpuinfo_mips *c)
#ifndef CONFIG_MIPS_CPS
if (cpu_has_mips_r2_r6) {
- c->core = get_ebase_cpunum();
+ unsigned int core;
+
+ core = get_ebase_cpunum();
if (cpu_has_mipsmt)
- c->core >>= fls(core_nvpes()) - 1;
+ core >>= fls(core_nvpes()) - 1;
+ cpu_set_core(c, core);
}
#endif
}
@@ -1391,24 +1397,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
- case PRID_IMP_R6000:
- c->cputype = CPU_R6000;
- __cpu_name[cpu] = "R6000";
- set_isa(c, MIPS_CPU_ISA_II);
- c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
- c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
- c->tlbsize = 32;
- break;
- case PRID_IMP_R6000A:
- c->cputype = CPU_R6000A;
- __cpu_name[cpu] = "R6000A";
- set_isa(c, MIPS_CPU_ISA_II);
- c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
- c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
- c->tlbsize = 32;
- break;
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
__cpu_name[cpu] = "RM7000";
@@ -1635,6 +1623,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_I6400;
__cpu_name[cpu] = "MIPS I6400";
break;
+ case PRID_IMP_I6500:
+ c->cputype = CPU_I6500;
+ __cpu_name[cpu] = "MIPS I6500";
+ break;
case PRID_IMP_M5150:
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
@@ -1648,6 +1640,17 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c);
spram_config();
+
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_I6500:
+ c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
+ /* fall-through */
+ case CPU_I6400:
+ c->options |= MIPS_CPU_SHARED_FTLB_RAM;
+ /* fall-through */
+ default:
+ break;
+ }
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
@@ -1831,6 +1834,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
+ case PRID_REV_LOONGSON3A_R3:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
}
decode_configs(c);
@@ -2089,3 +2098,35 @@ void cpu_report(void)
if (cpu_has_msa)
pr_info("MSA revision is: %08x\n", c->msa_id);
}
+
+void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >>
+ MIPS_GLOBALNUMBER_CLUSTER_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER;
+ cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF;
+}
+
+void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE;
+ cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF;
+}
+
+void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe)
+{
+ /* Ensure the VP(E) ID fits in the field */
+ WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF));
+
+ /* Ensure we're not using VP(E)s without support */
+ WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) &&
+ !IS_ENABLED(CONFIG_CPU_MIPSR6));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP;
+ cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF;
+}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 5c429d70e17f..0828d6d963b7 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -87,6 +87,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
bool elf32;
u32 flags;
int ret;
+ loff_t pos;
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
@@ -108,21 +109,16 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
if (phdr32->p_filesz < sizeof(abiflags))
return -EINVAL;
-
- ret = kernel_read(elf, phdr32->p_offset,
- (char *)&abiflags,
- sizeof(abiflags));
+ pos = phdr32->p_offset;
} else {
if (phdr64->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr64->p_filesz < sizeof(abiflags))
return -EINVAL;
-
- ret = kernel_read(elf, phdr64->p_offset,
- (char *)&abiflags,
- sizeof(abiflags));
+ pos = phdr64->p_offset;
}
+ ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos);
if (ret < 0)
return ret;
if (ret != sizeof(abiflags))
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ae810da4d499..37b9383eacd3 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -150,6 +150,7 @@ LEAF(__r4k_wait)
.align 5
BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
+ .cfi_signal_frame
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Check to see if the interrupted code has just disabled
@@ -181,7 +182,7 @@ NESTED(handle_int, PT_SIZE, sp)
1:
.set pop
#endif
- SAVE_ALL
+ SAVE_ALL docfi=1
CLI
TRACE_IRQS_OFF
@@ -269,8 +270,8 @@ NESTED(except_vec_ejtag_debug, 0, sp)
*/
BUILD_ROLLBACK_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
- SAVE_SOME
- SAVE_AT
+ SAVE_SOME docfi=1
+ SAVE_AT docfi=1
.set push
.set noreorder
PTR_LA v1, except_vec_vi_handler
@@ -396,6 +397,7 @@ NESTED(except_vec_nmi, 0, sp)
__FINIT
NESTED(nmi_handler, PT_SIZE, sp)
+ .cfi_signal_frame
.set push
.set noat
/*
@@ -478,6 +480,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.macro __BUILD_HANDLER exception handler clear verbose ext
.align 5
NESTED(handle_\exception, PT_SIZE, sp)
+ .cfi_signal_frame
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
@@ -485,8 +488,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set at
__BUILD_\verbose \exception
move a0, sp
- PTR_LA ra, ret_from_exception
- j do_\handler
+ jal do_\handler
+ j ret_from_exception
END(handle_\exception)
.endm
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 60ab4c44d305..7c246b69c545 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -11,6 +11,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 3e586daa3a32..32e3168316cd 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
insn.word = 0; /* nop */
}
- get_online_cpus();
mutex_lock(&text_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
- put_online_cpus();
}
#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 659e6d3ae335..e91c8c4e2eb5 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -12,10 +12,10 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
-#include <asm/mips-cm.h>
+#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
-void __iomem *mips_cm_base;
+void __iomem *mips_gcr_base;
void __iomem *mips_cm_l2sync_base;
int mips_cm_is64;
@@ -167,8 +167,8 @@ phys_addr_t __mips_cm_l2sync_phys_base(void)
* current location.
*/
base_reg = read_gcr_l2_only_sync_base();
- if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
- return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE;
/* Default to following the CM */
return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
@@ -183,19 +183,19 @@ static void mips_cm_probe_l2sync(void)
phys_addr_t addr;
/* L2-only sync was introduced with CM major revision 6 */
- major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
- CM_GCR_REV_MAJOR_SHF;
+ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >>
+ __ffs(CM_GCR_REV_MAJOR);
if (major_rev < 6)
return;
/* Find a location for the L2 sync region */
addr = mips_cm_l2sync_phys_base();
- BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr);
if (!addr)
return;
/* Set the region base address & enable it */
- write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
/* Map the region */
mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
@@ -211,41 +211,39 @@ int mips_cm_probe(void)
* No need to probe again if we have already been
* here before.
*/
- if (mips_cm_base)
+ if (mips_gcr_base)
return 0;
addr = mips_cm_phys_base();
- BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr);
if (!addr)
return -ENODEV;
- mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
- if (!mips_cm_base)
+ mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_gcr_base)
return -ENXIO;
/* sanity check that we're looking at a CM */
base_reg = read_gcr_base();
- if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
+ if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) {
pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
(unsigned long)addr);
- mips_cm_base = NULL;
+ mips_gcr_base = NULL;
return -ENODEV;
}
/* set default target to memory */
- base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
- base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
- write_gcr_base(base_reg);
+ change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM);
/* disable CM regions */
- write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
- write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
- write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
- write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
- write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
- write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
- write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
- write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK);
/* probe for an L2-only sync region */
mips_cm_probe_l2sync();
@@ -259,22 +257,54 @@ int mips_cm_probe(void)
return 0;
}
-void mips_cm_lock_other(unsigned int core, unsigned int vp)
+void mips_cm_lock_other(unsigned int cluster, unsigned int core,
+ unsigned int vp, unsigned int block)
{
- unsigned curr_core;
+ unsigned int curr_core, cm_rev;
u32 val;
+ cm_rev = mips_cm_revision();
preempt_disable();
- curr_core = current_cpu_data.core;
- spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
- per_cpu(cm_core_lock_flags, curr_core));
- if (mips_cm_revision() >= CM_REV_CM3) {
- val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
- val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
+ if (cm_rev >= CM_REV_CM3) {
+ val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
+ val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);
+
+ if (cm_rev >= CM_REV_CM3_5) {
+ val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER);
+ val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
+ } else {
+ WARN_ON(cluster != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ }
+
+ /*
+ * We need to disable interrupts in SMP systems in order to
+ * ensure that we don't interrupt the caller with code which
+ * may modify the redirect register. We do so here in a
+ * slightly obscure way by using a spin lock, since this has
+ * the neat property of also catching any nested uses of
+ * mips_cm_lock_other() leading to a deadlock or a nice warning
+ * with lockdep enabled.
+ */
+ spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
} else {
- BUG_ON(vp != 0);
- val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
+ WARN_ON(cluster != 0);
+ WARN_ON(vp != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ /*
+ * We only have a GCR_CL_OTHER per core in systems with
+ * CM 2.5 & older, so have to ensure other VP(E)s don't
+ * race with us.
+ */
+ curr_core = cpu_core(&current_cpu_data);
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
+ val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM);
}
write_gcr_cl_other(val);
@@ -288,10 +318,17 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
void mips_cm_unlock_other(void)
{
- unsigned curr_core = current_cpu_data.core;
+ unsigned int curr_core;
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ curr_core = cpu_core(&current_cpu_data);
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ } else {
+ spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ }
- spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
- per_cpu(cm_core_lock_flags, curr_core));
preempt_enable();
}
@@ -306,13 +343,13 @@ void mips_cm_error_report(void)
return;
revision = mips_cm_revision();
+ cm_error = read_gcr_error_cause();
+ cm_addr = read_gcr_error_addr();
+ cm_other = read_gcr_error_mult();
if (revision < CM_REV_CM3) { /* CM2 */
- cm_error = read_gcr_error_cause();
- cm_addr = read_gcr_error_addr();
- cm_other = read_gcr_error_mult();
- cause = cm_error >> CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
- ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+ cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE);
+ ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
if (!cause)
return;
@@ -354,11 +391,8 @@ void mips_cm_error_report(void)
ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
- cm_error = read64_gcr_error_cause();
- cm_addr = read64_gcr_error_addr();
- cm_other = read64_gcr_error_mult();
- cause = cm_error >> CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF;
- ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+ cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE);
+ ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
if (!cause)
return;
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index a4964c334cab..f66b05ebf637 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -12,8 +12,7 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
-#include <asm/mips-cm.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
void __iomem *mips_cpc_base;
@@ -40,13 +39,13 @@ static phys_addr_t mips_cpc_phys_base(void)
if (!mips_cm_present())
return 0;
- if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
return 0;
/* If the CPC is already enabled, leave it so */
cpc_base = read_gcr_cpc_base();
- if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
- return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
/* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
@@ -54,7 +53,7 @@ static phys_addr_t mips_cpc_phys_base(void)
return cpc_base;
/* Enable the CPC, mapped at the default address */
- write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
return cpc_base;
}
@@ -86,10 +85,10 @@ void mips_cpc_lock_other(unsigned int core)
return;
preempt_disable();
- curr_core = current_cpu_data.core;
+ curr_core = cpu_core(&current_cpu_data);
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
- write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+ write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
/*
* Ensure the core-other region reflects the appropriate core &
@@ -106,7 +105,7 @@ void mips_cpc_unlock_other(void)
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
return;
- curr_core = current_cpu_data.core;
+ curr_core = cpu_core(&current_cpu_data);
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
preempt_enable();
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index ae64c8f56a8c..eb18b186e858 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -46,9 +46,11 @@
#define LL "ll "
#define SC "sc "
-DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
-DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
-DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+#endif
extern const unsigned int fpucondbit[8];
@@ -600,7 +602,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir)
}
/* R6 removed instructions for the SPECIAL opcode */
-static struct r2_decoder_table spec_op_table[] = {
+static const struct r2_decoder_table spec_op_table[] = {
{ 0xfc1ff83f, 0x00000008, jr_func },
{ 0xfc00ffff, 0x00000018, mult_func },
{ 0xfc00ffff, 0x00000019, multu_func },
@@ -867,7 +869,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir)
}
/* R6 removed instructions for the SPECIAL2 opcode */
-static struct r2_decoder_table spec2_op_table[] = {
+static const struct r2_decoder_table spec2_op_table[] = {
{ 0xfc00ffff, 0x70000000, madd_func },
{ 0xfc00ffff, 0x70000001, maddu_func },
{ 0xfc0007ff, 0x70000002, mul_func },
@@ -881,9 +883,9 @@ static struct r2_decoder_table spec2_op_table[] = {
};
static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
- struct r2_decoder_table *table)
+ const struct r2_decoder_table *table)
{
- struct r2_decoder_table *p;
+ const struct r2_decoder_table *p;
int err;
for (p = table; p->func; p++) {
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
deleted file mode 100644
index 781168834456..000000000000
--- a/arch/mips/kernel/module-rela.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2001 Rusty Russell.
- * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2015 Imagination Technologies Ltd.
- */
-
-#include <linux/elf.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/moduleloader.h>
-
-extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v);
-
-static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = v;
-
- return 0;
-}
-
-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 RELA relocation\n",
- me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- pr_err("module %s: relocation overflow\n", me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) | (v & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_pc_rela(struct module *me, u32 *location, Elf_Addr v,
- unsigned bits)
-{
- unsigned long mask = GENMASK(bits - 1, 0);
- unsigned long se_bits;
- long offset;
-
- if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_PC%u RELA relocation\n",
- me->name, bits);
- return -ENOEXEC;
- }
-
- offset = ((long)v - (long)location) >> 2;
-
- /* check the sign bit onwards are identical - ie. we didn't overflow */
- se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
- if ((offset & ~mask) != (se_bits & ~mask)) {
- pr_err("module %s: relocation overflow\n", me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~mask) | (offset & mask);
-
- return 0;
-}
-
-static int apply_r_mips_pc16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 16);
-}
-
-static int apply_r_mips_pc21_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 21);
-}
-
-static int apply_r_mips_pc26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- return apply_r_mips_pc_rela(me, location, v, 26);
-}
-
-static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *(Elf_Addr *)location = v;
-
- return 0;
-}
-
-static int apply_r_mips_higher_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_highest_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
-
- return 0;
-}
-
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rela,
- [R_MIPS_26] = apply_r_mips_26_rela,
- [R_MIPS_HI16] = apply_r_mips_hi16_rela,
- [R_MIPS_LO16] = apply_r_mips_lo16_rela,
- [R_MIPS_PC16] = apply_r_mips_pc16_rela,
- [R_MIPS_64] = apply_r_mips_64_rela,
- [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
- [R_MIPS_HIGHEST] = apply_r_mips_highest_rela,
- [R_MIPS_PC21_S2] = apply_r_mips_pc21_rela,
- [R_MIPS_PC26_S2] = apply_r_mips_pc26_rela,
-};
-
-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
-{
- Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
- Elf_Sym *sym;
- u32 *location;
- unsigned int i, type;
- Elf_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
- if (sym->st_value >= -MAX_ERRNO) {
- /* Ignore unresolved weak symbol */
- if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
- continue;
- pr_warn("%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- type = ELF_MIPS_R_TYPE(rel[i]);
-
- if (type < ARRAY_SIZE(reloc_handlers_rela))
- handler = reloc_handlers_rela[type];
- else
- handler = NULL;
-
- if (!handler) {
- pr_err("%s: Unknown relocation type %u\n",
- me->name, type);
- return -EINVAL;
- }
-
- v = sym->st_value + rel[i].r_addend;
- res = handler(me, location, v);
- if (res)
- return res;
- }
-
- return 0;
-}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 94627a3a6a0d..491605137b03 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -53,22 +53,25 @@ void *module_alloc(unsigned long size)
}
#endif
-int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
return 0;
}
-static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_32(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- *location += v;
+ *location = base + v;
return 0;
}
-static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
me->name);
return -ENOEXEC;
}
@@ -80,15 +83,22 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((base + (v >> 2)) & 0x03ffffff);
return 0;
}
-static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_hi16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
struct mips_hi16 *n;
+ if (rela) {
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ return 0;
+ }
+
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
@@ -117,12 +127,18 @@ static void free_relocation_chain(struct mips_hi16 *l)
}
}
-static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_lo16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- unsigned long insnlo = *location;
+ unsigned long insnlo = base;
struct mips_hi16 *l;
Elf_Addr val, vallo;
+ if (rela) {
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+ return 0;
+ }
+
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
@@ -178,26 +194,26 @@ out_danger:
free_relocation_chain(l);
me->arch.r_mips_hi16_list = NULL;
- pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
+ pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name);
return -ENOEXEC;
}
-static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
- unsigned bits)
+static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
+ Elf_Addr v, unsigned int bits)
{
unsigned long mask = GENMASK(bits - 1, 0);
unsigned long se_bits;
long offset;
if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_PC%u REL relocation\n",
+ pr_err("module %s: dangerous R_MIPS_PC%u relocation\n",
me->name, bits);
return -ENOEXEC;
}
- /* retrieve & sign extend implicit addend */
- offset = *location & mask;
+ /* retrieve & sign extend implicit addend if any */
+ offset = base & mask;
offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
offset += ((long)v - (long)location) >> 2;
@@ -214,99 +230,192 @@ static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
return 0;
}
-static int apply_r_mips_pc16_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_pc16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 16);
+}
+
+static int apply_r_mips_pc21(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 21);
+}
+
+static int apply_r_mips_pc26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 26);
+}
+
+static int apply_r_mips_64(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 16);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *(Elf_Addr *)location = v;
+
+ return 0;
}
-static int apply_r_mips_pc21_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_higher(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 21);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
}
-static int apply_r_mips_pc26_rel(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_mips_highest(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
{
- return apply_r_mips_pc_rel(me, location, v, 26);
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
}
-static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
+/**
+ * reloc_handler() - Apply a particular relocation to a module
+ * @me: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @base: the existing value at location for REL-style; 0 for RELA-style
+ * @v: the value of the reloc, with addend for RELA-style
+ *
+ * Each implemented reloc_handler function applies a particular type of
+ * relocation to the module @me. Relocs that may be found in either REL or RELA
+ * variants can be handled by making use of the @base & @v parameters which are
+ * set to values which abstract the difference away from the particular reloc
+ * implementations.
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_handler)(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela);
+
+/* The handlers for known reloc types */
+static reloc_handler reloc_handlers[] = {
[R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rel,
- [R_MIPS_26] = apply_r_mips_26_rel,
- [R_MIPS_HI16] = apply_r_mips_hi16_rel,
- [R_MIPS_LO16] = apply_r_mips_lo16_rel,
- [R_MIPS_PC16] = apply_r_mips_pc16_rel,
- [R_MIPS_PC21_S2] = apply_r_mips_pc21_rel,
- [R_MIPS_PC26_S2] = apply_r_mips_pc26_rel,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_PC16] = apply_r_mips_pc16,
+ [R_MIPS_64] = apply_r_mips_64,
+ [R_MIPS_HIGHER] = apply_r_mips_higher,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest,
+ [R_MIPS_PC21_S2] = apply_r_mips_pc21,
+ [R_MIPS_PC26_S2] = apply_r_mips_pc26,
};
-int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
+static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me, bool rela)
{
- Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ union {
+ Elf_Mips_Rel *rel;
+ Elf_Mips_Rela *rela;
+ } r;
+ reloc_handler handler;
Elf_Sym *sym;
- u32 *location;
+ u32 *location, base;
unsigned int i, type;
Elf_Addr v;
- int res;
+ int err = 0;
+ size_t reloc_sz;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ r.rel = (void *)sechdrs[relsec].sh_addr;
+ reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel);
me->arch.r_mips_hi16_list = NULL;
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
+ + r.rel->r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
+ + ELF_MIPS_R_SYM(*r.rel);
if (sym->st_value >= -MAX_ERRNO) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
pr_warn("%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
- return -ENOENT;
+ err = -ENOENT;
+ goto out;
}
- type = ELF_MIPS_R_TYPE(rel[i]);
-
- if (type < ARRAY_SIZE(reloc_handlers_rel))
- handler = reloc_handlers_rel[type];
+ type = ELF_MIPS_R_TYPE(*r.rel);
+ if (type < ARRAY_SIZE(reloc_handlers))
+ handler = reloc_handlers[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n",
me->name, type);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- v = sym->st_value;
- res = handler(me, location, v);
- if (res)
- return res;
+ if (rela) {
+ v = sym->st_value + r.rela->r_addend;
+ base = 0;
+ r.rela = &r.rela[1];
+ } else {
+ v = sym->st_value;
+ base = *location;
+ r.rel = &r.rel[1];
+ }
+
+ err = handler(me, location, base, v, rela);
+ if (err)
+ goto out;
}
+out:
/*
- * Normally the hi16 list should be deallocated at this point. A
+ * Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
- * relocations not followed by a R_MIPS_LO16 relocation. In that
- * case, free up the list and return an error.
+ * relocations not followed by a R_MIPS_LO16 relocation, or if we hit
+ * an error processing a reloc we might have gotten here before
+ * reaching the R_MIPS_LO16. In either case, free up the list and
+ * return an error.
*/
if (me->arch.r_mips_hi16_list) {
free_relocation_chain(me->arch.r_mips_hi16_list);
me->arch.r_mips_hi16_list = NULL;
-
- return -ENOEXEC;
+ err = err ?: -ENOEXEC;
}
- return 0;
+ return err;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false);
+}
+
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true);
}
+#endif /* CONFIG_MODULES_USE_ELF_RELA */
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
@@ -317,7 +426,8 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
- e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
+ e = search_extable(dbe->dbe_start,
+ dbe->dbe_end - dbe->dbe_start, addr);
if (e)
break;
}
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 3375745b9198..e42113fe2762 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -10,12 +10,13 @@
* Copyright (C) 2000 MIPS Technologies, Inc.
* written by Carsten Langgaard, carstenl@mips.com
*/
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
-#define USE_ALTERNATE_RESUME_IMPL 1
- .set push
- .set arch=mips64r2
-#include "r4k_switch.S"
- .set pop
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index f3e301f95aef..6668f67a61c3 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
return -ENOENT;
}
- if ((unsigned int)event->cpu >= nr_cpumask_bits ||
- (event->cpu >= 0 && !cpu_online(event->cpu)))
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
@@ -814,7 +813,7 @@ static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
};
-static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
+static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
/* These only count dcache, not icache */
@@ -1014,7 +1013,7 @@ static const struct mips_perf_event mipsxxcore_cache_map2
},
};
-static const struct mips_perf_event i6400_cache_map
+static const struct mips_perf_event i6x00_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -1610,6 +1609,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_I6400:
+ case CPU_I6500:
/* 8-bit event numbers */
base_id = config & 0xff;
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1770,8 +1770,13 @@ init_hw_perf_events(void)
break;
case CPU_I6400:
mipspmu.name = "mips/I6400";
- mipspmu.general_event_map = &i6400_event_map;
- mipspmu.cache_event_map = &i6400_cache_map;
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_I6500:
+ mipspmu.name = "mips/I6500";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index d99416094ba9..4655017f2377 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -17,8 +17,7 @@
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/idle.h>
-#include <asm/mips-cm.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
@@ -49,7 +48,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
nc_asm_enter);
/* Bitmap indicating which states are supported by the system */
-DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
/*
* Indicates the number of coupled VPEs ready to operate in a non-coherent
@@ -114,7 +113,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
- unsigned core = current_cpu_data.core;
+ unsigned core = cpu_core(&current_cpu_data);
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
@@ -486,7 +485,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* defined by the interAptiv & proAptiv SUMs as ensuring that the
* operation resulting from the preceding store is complete.
*/
- uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
@@ -569,8 +568,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* rest will just be performing a rather unusual nop.
*/
uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
- ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK
- : CM3_GCR_Cx_COHERENCE_COHEN_MSK);
+ ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
+ : CM3_GCR_Cx_COHERENCE_COHEN);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
@@ -640,7 +639,7 @@ out_err:
static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
- unsigned core = cpu_data[cpu].core;
+ unsigned core = cpu_core(&cpu_data[cpu]);
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -692,7 +691,7 @@ static int __init cps_pm_init(void)
/* Detect whether a CPC is present */
if (mips_cpc_present()) {
/* Detect whether clock gating is implemented */
- if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
+ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
set_bit(CPS_PM_CLOCK_GATED, state_support);
else
pr_warn("pm-cps: CPC does not support clock gating\n");
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4eff2aed7360..bd9bf528f19b 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
seq_printf(m, "isa\t\t\t:");
- if (cpu_has_mips_r1)
+ if (cpu_has_mips_1)
seq_printf(m, " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
@@ -109,6 +109,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "ASEs implemented\t:");
if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
+ if (cpu_has_mips16e2) seq_printf(m, "%s", " mips16e2");
if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
@@ -133,13 +134,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
- seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
if (cpu_has_mipsmt)
- seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
else if (cpu_has_vp)
- seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id);
+ seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
#endif
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 5351e1f3950d..c5ff6bfe2825 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -208,13 +208,13 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
*
* microMIPS is way more fun...
*/
- if (mm_insn_16bit(ip->halfword[1])) {
+ if (mm_insn_16bit(ip->word >> 16)) {
switch (ip->mm16_r5_format.opcode) {
case mm_swsp16_op:
if (ip->mm16_r5_format.rt != 31)
return 0;
- *poff = ip->mm16_r5_format.simmediate;
+ *poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
@@ -287,7 +287,7 @@ static inline int is_jump_ins(union mips_instruction *ip)
*
* microMIPS is kind of more fun...
*/
- if (mm_insn_16bit(ip->halfword[1])) {
+ if (mm_insn_16bit(ip->word >> 16)) {
if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
(ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
return 1;
@@ -313,9 +313,11 @@ static inline int is_jump_ins(union mips_instruction *ip)
#endif
}
-static inline int is_sp_move_ins(union mips_instruction *ip)
+static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
{
#ifdef CONFIG_CPU_MICROMIPS
+ unsigned short tmp;
+
/*
* addiusp -imm
* addius5 sp,-imm
@@ -324,21 +326,40 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
*
* microMIPS is not more fun...
*/
- if (mm_insn_16bit(ip->halfword[1])) {
- return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
- ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
- (ip->mm16_r5_format.opcode == mm_pool16d_op &&
- ip->mm16_r5_format.rt == 29);
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
+ ip->mm16_r3_format.simmediate & mm_addiusp_func) {
+ tmp = ip->mm_b0_format.simmediate >> 1;
+ tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
+ if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
+ tmp ^= 0x100;
+ *frame_size = -(signed short)(tmp << 2);
+ return 1;
+ }
+ if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
+ ip->mm16_r5_format.rt == 29) {
+ tmp = ip->mm16_r5_format.imm >> 1;
+ *frame_size = -(signed short)(tmp & 0xf);
+ return 1;
+ }
+ return 0;
}
- return ip->mm_i_format.opcode == mm_addiu32_op &&
- ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
+ if (ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
- if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
+
+ if (ip->i_format.opcode == addiu_op ||
+ ip->i_format.opcode == daddiu_op) {
+ *frame_size = -ip->i_format.simmediate;
return 1;
+ }
#endif
return 0;
}
@@ -348,7 +369,9 @@ static int get_frame_info(struct mips_frame_info *info)
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
const unsigned int max_insns = 128;
+ unsigned int last_insn_size = 0;
unsigned int i;
+ bool saw_jump = false;
info->pc_offset = -1;
info->frame_size = 0;
@@ -359,47 +382,44 @@ static int get_frame_info(struct mips_frame_info *info)
ip_end = (void *)ip + info->func_size;
- for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
+ for (i = 0; i < max_insns && ip < ip_end; i++) {
+ ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
- insn.halfword[0] = 0;
- insn.halfword[1] = ip->halfword[0];
+ insn.word = ip->halfword[0] << 16;
+ last_insn_size = 2;
} else if (is_mmips) {
- insn.halfword[0] = ip->halfword[1];
- insn.halfword[1] = ip->halfword[0];
+ insn.word = ip->halfword[0] << 16 | ip->halfword[1];
+ last_insn_size = 4;
} else {
insn.word = ip->word;
+ last_insn_size = 4;
}
- if (is_jump_ins(&insn))
- break;
-
if (!info->frame_size) {
- if (is_sp_move_ins(&insn))
- {
-#ifdef CONFIG_CPU_MICROMIPS
- if (mm_insn_16bit(ip->halfword[0]))
- {
- unsigned short tmp;
-
- if (ip->halfword[0] & mm_addiusp_func)
- {
- tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
- info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
- } else {
- tmp = (ip->halfword[0] >> 1);
- info->frame_size = -(signed short)(tmp & 0xf);
- }
- ip = (void *) &ip->halfword[1];
- ip--;
- } else
-#endif
- info->frame_size = - ip->i_format.simmediate;
- }
+ is_sp_move_ins(&insn, &info->frame_size);
+ continue;
+ } else if (!saw_jump && is_jump_ins(ip)) {
+ /*
+ * If we see a jump instruction, we are finished
+ * with the frame save.
+ *
+ * Some functions can have a shortcut return at
+ * the beginning of the function, so don't start
+ * looking for jump instruction until we see the
+ * frame setup.
+ *
+ * The RA save instruction can get put into the
+ * delay slot of the jump instruction, so look
+ * at the next instruction, too.
+ */
+ saw_jump = true;
continue;
}
if (info->pc_offset == -1 &&
is_ra_save_ins(&insn, &info->pc_offset))
break;
+ if (saw_jump)
+ break;
}
if (info->frame_size && info->pc_offset >= 0) /* nested */
return 0;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6931fe722a0b..1395654cfc8d 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -868,14 +868,37 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
tracehook_report_syscall_entry(regs))
return -1;
- if (secure_computing(NULL) == -1)
- return -1;
+#ifdef CONFIG_SECCOMP
+ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+ int ret, i;
+ struct seccomp_data sd;
+ unsigned long args[6];
+
+ sd.nr = syscall;
+ sd.arch = syscall_get_arch();
+ syscall_get_arguments(current, regs, 0, 6, args);
+ for (i = 0; i < 6; i++)
+ sd.args[i] = args[i];
+ sd.instruction_pointer = KSTK_EIP(current);
+
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
+ }
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
+
+ /*
+ * Negative syscall numbers are mistaken for rejected syscalls, but
+ * won't have had the return value set appropriately, so we do so now.
+ */
+ if (syscall < 0)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
return syscall;
}
@@ -895,7 +918,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[2]);
+ trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 918f2f6d3861..3062ba66c563 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -12,7 +12,9 @@
* Copyright (c) 1998 Harald Koerfgen
*/
#include <asm/asm.h>
+#include <asm/asmmacro.h>
#include <asm/errno.h>
+#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
@@ -31,9 +33,85 @@
PTR 9b+4,bad_stack; \
.previous
- .set noreorder
.set mips1
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+ fpu_save_single a0, t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+ fpu_restore_single a0, t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcr31 to comes in $a0.
+ */
+
+ .set push
+ SET_HARDFLOAT
+
+LEAF(_init_fpu)
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU1
+ or t0, t1
+ mtc0 t0, CP0_STATUS
+
+ ctc1 a0, fcr31
+
+ li t0, -1
+
+ mtc1 t0, $f0
+ mtc1 t0, $f1
+ mtc1 t0, $f2
+ mtc1 t0, $f3
+ mtc1 t0, $f4
+ mtc1 t0, $f5
+ mtc1 t0, $f6
+ mtc1 t0, $f7
+ mtc1 t0, $f8
+ mtc1 t0, $f9
+ mtc1 t0, $f10
+ mtc1 t0, $f11
+ mtc1 t0, $f12
+ mtc1 t0, $f13
+ mtc1 t0, $f14
+ mtc1 t0, $f15
+ mtc1 t0, $f16
+ mtc1 t0, $f17
+ mtc1 t0, $f18
+ mtc1 t0, $f19
+ mtc1 t0, $f20
+ mtc1 t0, $f21
+ mtc1 t0, $f22
+ mtc1 t0, $f23
+ mtc1 t0, $f24
+ mtc1 t0, $f25
+ mtc1 t0, $f26
+ mtc1 t0, $f27
+ mtc1 t0, $f28
+ mtc1 t0, $f29
+ mtc1 t0, $f30
+ mtc1 t0, $f31
+ jr ra
+ END(_init_fpu)
+
+ .set pop
+
+ .set noreorder
+
/**
* _save_fp_context() - save FP context from the FPU
* @a0 - pointer to fpregs field of sigcontext
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 1049eeafd97d..e57703b1de50 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -26,12 +26,6 @@
.align 5
/*
- * Offset to the current process status flags, the first 32 bytes of the
- * stack are not used.
- */
-#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
-
-/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti)
*/
@@ -68,78 +62,3 @@ LEAF(resume)
move v0, a0
jr ra
END(resume)
-
-/*
- * Save a thread's fp context.
- */
-LEAF(_save_fp)
-EXPORT_SYMBOL(_save_fp)
- fpu_save_single a0, t1 # clobbers t1
- jr ra
- END(_save_fp)
-
-/*
- * Restore a thread's fp context.
- */
-LEAF(_restore_fp)
- fpu_restore_single a0, t1 # clobbers t1
- jr ra
- END(_restore_fp)
-
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using has
- * the property that no matter whether considered as single or as double
- * precision represents signaling NANS.
- *
- * The value to initialize fcr31 to comes in $a0.
- */
-
- .set push
- SET_HARDFLOAT
-
-LEAF(_init_fpu)
- mfc0 t0, CP0_STATUS
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_STATUS
-
- ctc1 a0, fcr31
-
- li t0, -1
-
- mtc1 t0, $f0
- mtc1 t0, $f1
- mtc1 t0, $f2
- mtc1 t0, $f3
- mtc1 t0, $f4
- mtc1 t0, $f5
- mtc1 t0, $f6
- mtc1 t0, $f7
- mtc1 t0, $f8
- mtc1 t0, $f9
- mtc1 t0, $f10
- mtc1 t0, $f11
- mtc1 t0, $f12
- mtc1 t0, $f13
- mtc1 t0, $f14
- mtc1 t0, $f15
- mtc1 t0, $f16
- mtc1 t0, $f17
- mtc1 t0, $f18
- mtc1 t0, $f19
- mtc1 t0, $f20
- mtc1 t0, $f21
- mtc1 t0, $f22
- mtc1 t0, $f23
- mtc1 t0, $f24
- mtc1 t0, $f25
- mtc1 t0, $f26
- mtc1 t0, $f27
- mtc1 t0, $f28
- mtc1 t0, $f29
- mtc1 t0, $f30
- mtc1 t0, $f31
- jr ra
- END(_init_fpu)
-
- .set pop
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 56d86b09c917..0a83b1708b3c 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/errno.h>
+#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
@@ -34,6 +35,201 @@
.previous
.endm
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+ defined(CONFIG_CPU_MIPS32_R6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_save_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+ defined(CONFIG_CPU_MIPS32_R6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_restore_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+EXPORT_SYMBOL(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
+#endif
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcr31 to comes in $a0.
+ */
+
+ .set push
+ SET_HARDFLOAT
+
+LEAF(_init_fpu)
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU1
+ or t0, t1
+ mtc0 t0, CP0_STATUS
+ enable_fpu_hazard
+
+ ctc1 a0, fcr31
+
+ li t1, -1 # SNaN
+
+#ifdef CONFIG_64BIT
+ sll t0, t0, 5
+ bgez t0, 1f # 16 / 32 register mode?
+
+ dmtc1 t1, $f1
+ dmtc1 t1, $f3
+ dmtc1 t1, $f5
+ dmtc1 t1, $f7
+ dmtc1 t1, $f9
+ dmtc1 t1, $f11
+ dmtc1 t1, $f13
+ dmtc1 t1, $f15
+ dmtc1 t1, $f17
+ dmtc1 t1, $f19
+ dmtc1 t1, $f21
+ dmtc1 t1, $f23
+ dmtc1 t1, $f25
+ dmtc1 t1, $f27
+ dmtc1 t1, $f29
+ dmtc1 t1, $f31
+1:
+#endif
+
+#ifdef CONFIG_CPU_MIPS32
+ mtc1 t1, $f0
+ mtc1 t1, $f1
+ mtc1 t1, $f2
+ mtc1 t1, $f3
+ mtc1 t1, $f4
+ mtc1 t1, $f5
+ mtc1 t1, $f6
+ mtc1 t1, $f7
+ mtc1 t1, $f8
+ mtc1 t1, $f9
+ mtc1 t1, $f10
+ mtc1 t1, $f11
+ mtc1 t1, $f12
+ mtc1 t1, $f13
+ mtc1 t1, $f14
+ mtc1 t1, $f15
+ mtc1 t1, $f16
+ mtc1 t1, $f17
+ mtc1 t1, $f18
+ mtc1 t1, $f19
+ mtc1 t1, $f20
+ mtc1 t1, $f21
+ mtc1 t1, $f22
+ mtc1 t1, $f23
+ mtc1 t1, $f24
+ mtc1 t1, $f25
+ mtc1 t1, $f26
+ mtc1 t1, $f27
+ mtc1 t1, $f28
+ mtc1 t1, $f29
+ mtc1 t1, $f30
+ mtc1 t1, $f31
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set fp=64
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip setting upper 32b
+
+ mthc1 t1, $f0
+ mthc1 t1, $f1
+ mthc1 t1, $f2
+ mthc1 t1, $f3
+ mthc1 t1, $f4
+ mthc1 t1, $f5
+ mthc1 t1, $f6
+ mthc1 t1, $f7
+ mthc1 t1, $f8
+ mthc1 t1, $f9
+ mthc1 t1, $f10
+ mthc1 t1, $f11
+ mthc1 t1, $f12
+ mthc1 t1, $f13
+ mthc1 t1, $f14
+ mthc1 t1, $f15
+ mthc1 t1, $f16
+ mthc1 t1, $f17
+ mthc1 t1, $f18
+ mthc1 t1, $f19
+ mthc1 t1, $f20
+ mthc1 t1, $f21
+ mthc1 t1, $f22
+ mthc1 t1, $f23
+ mthc1 t1, $f24
+ mthc1 t1, $f25
+ mthc1 t1, $f26
+ mthc1 t1, $f27
+ mthc1 t1, $f28
+ mthc1 t1, $f29
+ mthc1 t1, $f30
+ mthc1 t1, $f31
+1: .set pop
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
+#else
+ .set MIPS_ISA_ARCH_LEVEL_RAW
+ dmtc1 t1, $f0
+ dmtc1 t1, $f2
+ dmtc1 t1, $f4
+ dmtc1 t1, $f6
+ dmtc1 t1, $f8
+ dmtc1 t1, $f10
+ dmtc1 t1, $f12
+ dmtc1 t1, $f14
+ dmtc1 t1, $f16
+ dmtc1 t1, $f18
+ dmtc1 t1, $f20
+ dmtc1 t1, $f22
+ dmtc1 t1, $f24
+ dmtc1 t1, $f26
+ dmtc1 t1, $f28
+ dmtc1 t1, $f30
+#endif
+ jr ra
+ END(_init_fpu)
+
+ .set pop /* SET_HARDFLOAT */
+
.set noreorder
/**
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 7b386d54fd65..17cf9341c1cf 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -12,8 +12,6 @@
*/
#include <asm/asm.h>
#include <asm/cachectl.h>
-#include <asm/export.h>
-#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
@@ -22,10 +20,6 @@
#include <asm/asmmacro.h>
-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
-#undef fp
-
-#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti)
@@ -63,200 +57,3 @@
move v0, a0
jr ra
END(resume)
-
-#endif /* USE_ALTERNATE_RESUME_IMPL */
-
-/*
- * Save a thread's fp context.
- */
-LEAF(_save_fp)
-EXPORT_SYMBOL(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
- mfc0 t0, CP0_STATUS
-#endif
- fpu_save_double a0 t0 t1 # clobbers t1
- jr ra
- END(_save_fp)
-
-/*
- * Restore a thread's fp context.
- */
-LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
- mfc0 t0, CP0_STATUS
-#endif
- fpu_restore_double a0 t0 t1 # clobbers t1
- jr ra
- END(_restore_fp)
-
-#ifdef CONFIG_CPU_HAS_MSA
-
-/*
- * Save a thread's MSA vector context.
- */
-LEAF(_save_msa)
-EXPORT_SYMBOL(_save_msa)
- msa_save_all a0
- jr ra
- END(_save_msa)
-
-/*
- * Restore a thread's MSA vector context.
- */
-LEAF(_restore_msa)
- msa_restore_all a0
- jr ra
- END(_restore_msa)
-
-LEAF(_init_msa_upper)
- msa_init_all_upper
- jr ra
- END(_init_msa_upper)
-
-#endif
-
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using has
- * the property that no matter whether considered as single or as double
- * precision represents signaling NANS.
- *
- * The value to initialize fcr31 to comes in $a0.
- */
-
- .set push
- SET_HARDFLOAT
-
-LEAF(_init_fpu)
- mfc0 t0, CP0_STATUS
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_STATUS
- enable_fpu_hazard
-
- ctc1 a0, fcr31
-
- li t1, -1 # SNaN
-
-#ifdef CONFIG_64BIT
- sll t0, t0, 5
- bgez t0, 1f # 16 / 32 register mode?
-
- dmtc1 t1, $f1
- dmtc1 t1, $f3
- dmtc1 t1, $f5
- dmtc1 t1, $f7
- dmtc1 t1, $f9
- dmtc1 t1, $f11
- dmtc1 t1, $f13
- dmtc1 t1, $f15
- dmtc1 t1, $f17
- dmtc1 t1, $f19
- dmtc1 t1, $f21
- dmtc1 t1, $f23
- dmtc1 t1, $f25
- dmtc1 t1, $f27
- dmtc1 t1, $f29
- dmtc1 t1, $f31
-1:
-#endif
-
-#ifdef CONFIG_CPU_MIPS32
- mtc1 t1, $f0
- mtc1 t1, $f1
- mtc1 t1, $f2
- mtc1 t1, $f3
- mtc1 t1, $f4
- mtc1 t1, $f5
- mtc1 t1, $f6
- mtc1 t1, $f7
- mtc1 t1, $f8
- mtc1 t1, $f9
- mtc1 t1, $f10
- mtc1 t1, $f11
- mtc1 t1, $f12
- mtc1 t1, $f13
- mtc1 t1, $f14
- mtc1 t1, $f15
- mtc1 t1, $f16
- mtc1 t1, $f17
- mtc1 t1, $f18
- mtc1 t1, $f19
- mtc1 t1, $f20
- mtc1 t1, $f21
- mtc1 t1, $f22
- mtc1 t1, $f23
- mtc1 t1, $f24
- mtc1 t1, $f25
- mtc1 t1, $f26
- mtc1 t1, $f27
- mtc1 t1, $f28
- mtc1 t1, $f29
- mtc1 t1, $f30
- mtc1 t1, $f31
-
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
- .set push
- .set MIPS_ISA_LEVEL_RAW
- .set fp=64
- sll t0, t0, 5 # is Status.FR set?
- bgez t0, 1f # no: skip setting upper 32b
-
- mthc1 t1, $f0
- mthc1 t1, $f1
- mthc1 t1, $f2
- mthc1 t1, $f3
- mthc1 t1, $f4
- mthc1 t1, $f5
- mthc1 t1, $f6
- mthc1 t1, $f7
- mthc1 t1, $f8
- mthc1 t1, $f9
- mthc1 t1, $f10
- mthc1 t1, $f11
- mthc1 t1, $f12
- mthc1 t1, $f13
- mthc1 t1, $f14
- mthc1 t1, $f15
- mthc1 t1, $f16
- mthc1 t1, $f17
- mthc1 t1, $f18
- mthc1 t1, $f19
- mthc1 t1, $f20
- mthc1 t1, $f21
- mthc1 t1, $f22
- mthc1 t1, $f23
- mthc1 t1, $f24
- mthc1 t1, $f25
- mthc1 t1, $f26
- mthc1 t1, $f27
- mthc1 t1, $f28
- mthc1 t1, $f29
- mthc1 t1, $f30
- mthc1 t1, $f31
-1: .set pop
-#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
-#else
- .set MIPS_ISA_ARCH_LEVEL_RAW
- dmtc1 t1, $f0
- dmtc1 t1, $f2
- dmtc1 t1, $f4
- dmtc1 t1, $f6
- dmtc1 t1, $f8
- dmtc1 t1, $f10
- dmtc1 t1, $f12
- dmtc1 t1, $f14
- dmtc1 t1, $f16
- dmtc1 t1, $f18
- dmtc1 t1, $f20
- dmtc1 t1, $f22
- dmtc1 t1, $f24
- dmtc1 t1, $f26
- dmtc1 t1, $f28
- dmtc1 t1, $f30
-#endif
- jr ra
- END(_init_fpu)
-
- .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
deleted file mode 100644
index 9cc7bfab3419..000000000000
--- a/arch/mips/kernel/r6000_fpu.S
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * r6000_fpu.S: Save/restore floating point context for signal handlers.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 by Ralf Baechle
- *
- * Multi-arch abstraction and asm macros for easier reading:
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- */
-#include <asm/asm.h>
-#include <asm/fpregdef.h>
-#include <asm/mipsregs.h>
-#include <asm/asm-offsets.h>
-#include <asm/regdef.h>
-
- .set noreorder
- .set mips2
- .set push
- SET_HARDFLOAT
-
-/**
- * _save_fp_context() - save FP context from the FPU
- * @a0 - pointer to fpregs field of sigcontext
- * @a1 - pointer to fpc_csr field of sigcontext
- *
- * Save FP context, including the 32 FP data registers and the FP
- * control & status register, from the FPU to signal context.
- */
- LEAF(_save_fp_context)
- mfc0 t0,CP0_STATUS
- sll t0,t0,2
- bgez t0,1f
- nop
-
- cfc1 t1,fcr31
- /* Store the 16 double precision registers */
- sdc1 $f0,0(a0)
- sdc1 $f2,16(a0)
- sdc1 $f4,32(a0)
- sdc1 $f6,48(a0)
- sdc1 $f8,64(a0)
- sdc1 $f10,80(a0)
- sdc1 $f12,96(a0)
- sdc1 $f14,112(a0)
- sdc1 $f16,128(a0)
- sdc1 $f18,144(a0)
- sdc1 $f20,160(a0)
- sdc1 $f22,176(a0)
- sdc1 $f24,192(a0)
- sdc1 $f26,208(a0)
- sdc1 $f28,224(a0)
- sdc1 $f30,240(a0)
- jr ra
- sw t0,(a1)
-1: jr ra
- nop
- END(_save_fp_context)
-
-/**
- * _restore_fp_context() - restore FP context to the FPU
- * @a0 - pointer to fpregs field of sigcontext
- * @a1 - pointer to fpc_csr field of sigcontext
- *
- * Restore FP context, including the 32 FP data registers and the FP
- * control & status register, from signal context to the FPU.
- */
- LEAF(_restore_fp_context)
- mfc0 t0,CP0_STATUS
- sll t0,t0,2
-
- bgez t0,1f
- lw t0,(a1)
- /* Restore the 16 double precision registers */
- ldc1 $f0,0(a0)
- ldc1 $f2,16(a0)
- ldc1 $f4,32(a0)
- ldc1 $f6,48(a0)
- ldc1 $f8,64(a0)
- ldc1 $f10,80(a0)
- ldc1 $f12,96(a0)
- ldc1 $f14,112(a0)
- ldc1 $f16,128(a0)
- ldc1 $f18,144(a0)
- ldc1 $f20,160(a0)
- ldc1 $f22,176(a0)
- ldc1 $f24,192(a0)
- ldc1 $f26,208(a0)
- ldc1 $f28,224(a0)
- ldc1 $f30,240(a0)
- jr ra
- ctc1 t0,fcr31
-1: jr ra
- nop
- END(_restore_fp_context)
-
- .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 80ed68b2c95e..a9a7d78803cd 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -190,12 +190,6 @@ illegal_syscall:
sll t1, t0, 2
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
- sw a0, PT_R2(sp) # call routine directly on restart
-
- /* Some syscalls like execve get their arguments from struct pt_regs
- and claim zero arguments in the syscall table. Thus we have to
- assume the worst case and shuffle around all potential arguments.
- If you want performance, don't use indirect syscalls. */
move a0, a1 # shift argument registers
move a1, a2
@@ -207,11 +201,6 @@ illegal_syscall:
sw t4, 16(sp)
sw t5, 20(sp)
sw t6, 24(sp)
- sw a0, PT_R4(sp) # .. and push back a0 - a3, some
- sw a1, PT_R5(sp) # syscalls expect them there
- sw a2, PT_R6(sp)
- sw a3, PT_R7(sp)
- sw a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
@@ -371,7 +360,7 @@ EXPORT(sys_call_table)
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 49765b44aa9b..65d5aeeb9bdb 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -311,7 +311,7 @@ EXPORT(sys_call_table)
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 90bad2d1b2d3..cbf190ef9e8a 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -302,7 +302,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 2dd70bd104e1..9ebe3e2403b1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -198,7 +198,6 @@ LEAF(sys32_syscall)
dsll t1, t0, 3
beqz v0, einval
ld t2, sys32_call_table(t1) # syscall routine
- sd a0, PT_R2(sp) # call routine directly on restart
move a0, a1 # shift argument registers
move a1, a2
@@ -207,11 +206,6 @@ LEAF(sys32_syscall)
move a4, a5
move a5, a6
move a6, a7
- sd a0, PT_R4(sp) # ... and push back a0 - a3, some
- sd a1, PT_R5(sp) # syscalls expect them there
- sd a2, PT_R6(sp)
- sd a3, PT_R7(sp)
- sd a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
@@ -371,7 +365,7 @@ EXPORT(sys32_call_table)
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 01d1dbde5fbf..fe3939726765 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -670,6 +670,46 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+static int __init early_parse_memmap(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!strncmp(p, "exactmap", 8)) {
+ pr_err("\"memmap=exactmap\" invalid on MIPS\n");
+ return 0;
+ }
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ if (*p == '@') {
+ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
+ } else if (*p == '#') {
+ pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
+ return -EINVAL;
+ } else if (*p == '$') {
+ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
+ } else {
+ pr_err("\"memmap\" invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (*p == '\0') {
+ usermem = 1;
+ return 0;
+ } else
+ return -EINVAL;
+}
+early_param("memmap", early_parse_memmap);
+
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 84165f2b31ff..cf5c7c05e5a3 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -93,38 +93,37 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user((short)from->si_code, &to->si_code);
+ err |= __put_user(from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
- switch (from->si_code >> 16) {
- case __SI_TIMER >> 16:
+ switch (siginfo_layout(from->si_signo, from->si_code)) {
+ case SIL_TIMER:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_int, &to->si_int);
break;
- case __SI_CHLD >> 16:
+ case SIL_CHLD:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
- default:
+ case SIL_KILL:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
- case __SI_FAULT >> 16:
+ case SIL_FAULT:
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
- case __SI_POLL >> 16:
+ case SIL_POLL:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
- case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
- case __SI_MESGQ >> 16:
+ case SIL_RT:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
- case __SI_SYS >> 16:
+ case SIL_SYS:
err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr,
sizeof(compat_uptr_t));
err |= __put_user(from->si_syscall, &to->si_syscall);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 1b070a76fcdd..406072e26752 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -179,7 +179,7 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
/*
* Tell the hardware to boot CPUx - runs on CPU0
*/
-static void bmips_boot_secondary(int cpu, struct task_struct *idle)
+static int bmips_boot_secondary(int cpu, struct task_struct *idle)
{
bmips_smp_boot_sp = __KSTK_TOS(idle);
bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
@@ -231,6 +231,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
}
cpumask_set_cpu(cpu, &bmips_booted_mask);
}
+
+ return 0;
}
/*
@@ -245,7 +247,7 @@ static void bmips_init_secondary(void)
break;
case CPU_BMIPS5000:
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
- current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3;
+ cpu_set_core(&current_cpu_data, (read_c0_brcm_config() >> 25) & 3);
break;
}
}
@@ -409,7 +411,7 @@ void __ref play_dead(void)
#endif /* CONFIG_HOTPLUG_CPU */
-struct plat_smp_ops bmips43xx_smp_ops = {
+const struct plat_smp_ops bmips43xx_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
@@ -423,7 +425,7 @@ struct plat_smp_ops bmips43xx_smp_ops = {
#endif
};
-struct plat_smp_ops bmips5000_smp_ops = {
+const struct plat_smp_ops bmips5000_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 76923349b4fe..05295a4909f1 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -24,7 +24,6 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
-#include <linux/irqchip/mips-gic.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
@@ -78,7 +77,7 @@ static void cmp_smp_finish(void)
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*/
-static void cmp_boot_secondary(int cpu, struct task_struct *idle)
+static int cmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
@@ -95,6 +94,7 @@ static void cmp_boot_secondary(int cpu, struct task_struct *idle)
#endif
amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
+ return 0;
}
/*
@@ -148,7 +148,7 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
}
-struct plat_smp_ops cmp_smp_ops = {
+const struct plat_smp_ops cmp_smp_ops = {
.send_ipi_single = mips_smp_send_ipi_single,
.send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = cmp_init_secondary,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 36954ddd0b9f..0063122c85da 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -11,7 +11,6 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/irqchip/mips-gic.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/hotplug.h>
#include <linux/slab.h>
@@ -19,8 +18,7 @@
#include <linux/types.h>
#include <asm/bcache.h>
-#include <asm/mips-cm.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
@@ -41,55 +39,58 @@ static int __init setup_nothreads(char *s)
}
early_param("nothreads", setup_nothreads);
-static unsigned core_vpe_count(unsigned core)
+static unsigned core_vpe_count(unsigned int cluster, unsigned core)
{
- unsigned cfg;
-
if (threads_disabled)
return 1;
- if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
- && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
- return 1;
-
- mips_cm_lock_other(core, 0);
- cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
- mips_cm_unlock_other();
- return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+ return mips_cps_numvps(cluster, core);
}
static void __init cps_smp_setup(void)
{
- unsigned int ncores, nvpes, core_vpes;
+ unsigned int nclusters, ncores, nvpes, core_vpes;
unsigned long core_entry;
- int c, v;
+ int cl, c, v;
/* Detect & record VPE topology */
- ncores = mips_cm_numcores();
+ nvpes = 0;
+ nclusters = mips_cps_numclusters();
pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
- for (c = nvpes = 0; c < ncores; c++) {
- core_vpes = core_vpe_count(c);
- pr_cont("%c%u", c ? ',' : '{', core_vpes);
-
- /* Use the number of VPEs in core 0 for smp_num_siblings */
- if (!c)
- smp_num_siblings = core_vpes;
-
- for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
- cpu_data[nvpes + v].core = c;
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
- cpu_data[nvpes + v].vpe_id = v;
-#endif
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl > 0)
+ pr_cont(",");
+ pr_cont("{");
+
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(cl, c);
+
+ if (c > 0)
+ pr_cont(",");
+ pr_cont("%u", core_vpes);
+
+ /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
+ if (!cl && !c)
+ smp_num_siblings = core_vpes;
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_set_cluster(&cpu_data[nvpes + v], cl);
+ cpu_set_core(&cpu_data[nvpes + v], c);
+ cpu_set_vpe_id(&cpu_data[nvpes + v], v);
+ }
+
+ nvpes += core_vpes;
}
- nvpes += core_vpes;
+ pr_cont("}");
}
- pr_cont("} total %u\n", nvpes);
+ pr_cont(" total %u\n", nvpes);
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
- set_cpu_possible(v, true);
- set_cpu_present(v, true);
+ set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
+ set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
@@ -121,7 +122,7 @@ static void __init cps_smp_setup(void)
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
unsigned ncores, core_vpes, c, cca;
- bool cca_unsuitable;
+ bool cca_unsuitable, cores_limited;
u32 *entry_code;
mips_mt_set_cpuoptions();
@@ -141,16 +142,21 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
}
/* Warn the user if the CCA prevents multi-core */
- ncores = mips_cm_numcores();
- if (cca_unsuitable && ncores > 1) {
- pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
- cca);
-
+ cores_limited = false;
+ if (cca_unsuitable || cpu_has_dc_aliases) {
for_each_present_cpu(c) {
- if (cpu_data[c].core)
- set_cpu_present(c, false);
+ if (cpus_are_siblings(smp_processor_id(), c))
+ continue;
+
+ set_cpu_present(c, false);
+ cores_limited = true;
}
}
+ if (cores_limited)
+ pr_warn("Using only one core due to %s%s%s\n",
+ cca_unsuitable ? "unsuitable CCA" : "",
+ (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
+ cpu_has_dc_aliases ? "dcache aliasing" : "");
/*
* Patch the start of mips_cps_core_entry to provide:
@@ -166,6 +172,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
__sync();
/* Allocate core boot configuration structs */
+ ncores = mips_cps_numcores(0);
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
GFP_KERNEL);
if (!mips_cps_core_bootcfg) {
@@ -175,7 +182,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
- core_vpes = core_vpe_count(c);
+ core_vpes = core_vpe_count(0, c);
mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*mips_cps_core_bootcfg[c].vpe_config),
GFP_KERNEL);
@@ -187,7 +194,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
}
/* Mark this CPU as booted */
- atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
+ atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
1 << cpu_vpe_id(&current_cpu_data));
return;
@@ -210,11 +217,11 @@ err_out:
static void boot_core(unsigned int core, unsigned int vpe_id)
{
- u32 access, stat, seq_state;
+ u32 stat, seq_state;
unsigned timeout;
/* Select the appropriate core */
- mips_cm_lock_other(core, 0);
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -223,12 +230,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
write_gcr_co_coherence(0);
/* Start it with the legacy memory map and exception base */
- write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
+ write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */
- access = read_gcr_access();
- access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
- write_gcr_access(access);
+ set_gcr_access(1 << core);
if (mips_cpc_present()) {
/* Reset the core */
@@ -251,7 +256,8 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
timeout = 100;
while (true) {
stat = read_cpc_co_stat_conf();
- seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
/* U6 == coherent execution, ie. the core is up */
if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
@@ -283,15 +289,15 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
static void remote_vpe_boot(void *dummy)
{
- unsigned core = current_cpu_data.core;
+ unsigned core = cpu_core(&current_cpu_data);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
}
-static void cps_boot_secondary(int cpu, struct task_struct *idle)
+static int cps_boot_secondary(int cpu, struct task_struct *idle)
{
- unsigned core = cpu_data[cpu].core;
+ unsigned core = cpu_core(&cpu_data[cpu]);
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
@@ -299,6 +305,10 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
unsigned int remote;
int err;
+ /* We don't yet support booting CPUs in other clusters */
+ if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&current_cpu_data))
+ return -ENOSYS;
+
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
@@ -314,16 +324,16 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
}
if (cpu_has_vp) {
- mips_cm_lock_other(core, vpe_id);
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_co_reset_base(core_entry);
mips_cm_unlock_other();
}
- if (core != current_cpu_data.core) {
+ if (!cpus_are_siblings(cpu, smp_processor_id())) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
- if (cpu_data[remote].core != core)
+ if (!cpus_are_siblings(cpu, remote))
continue;
if (cpu_online(remote))
break;
@@ -347,6 +357,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
mips_cps_boot_vpes(core_cfg, vpe_id);
out:
preempt_enable();
+ return 0;
}
static void cps_init_secondary(void)
@@ -356,7 +367,7 @@ static void cps_init_secondary(void)
dmt();
if (mips_cm_revision() >= CM_REV_CM3) {
- unsigned ident = gic_read_local_vp_id();
+ unsigned int ident = read_gic_vl_ident();
/*
* Ensure that our calculation of the VP ID matches up with
@@ -400,7 +411,7 @@ static int cps_cpu_disable(void)
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
- core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+ core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
@@ -422,15 +433,17 @@ void play_dead(void)
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
- core = cpu_data[cpu].core;
+ core = cpu_core(&cpu_data[cpu]);
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
if (cpu_has_mipsmt || cpu_has_vp) {
+ core = cpu_core(&cpu_data[cpu]);
+
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
- if (cpu_data[cpu_death_sibling].core != core)
+ if (!cpus_are_siblings(cpu, cpu_death_sibling))
continue;
/*
@@ -486,8 +499,9 @@ static void wait_for_sibling_halt(void *ptr_cpu)
static void cps_cpu_die(unsigned int cpu)
{
- unsigned core = cpu_data[cpu].core;
+ unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ ktime_t fail_time;
unsigned stat;
int err;
@@ -514,16 +528,37 @@ static void cps_cpu_die(unsigned int cpu)
* state, the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core.
*/
+ fail_time = ktime_add_ms(ktime_get(), 2000);
do {
- mips_cm_lock_other(core, 0);
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
- stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE;
+ stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
mips_cpc_unlock_other();
mips_cm_unlock_other();
- } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
- stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
- stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
+
+ if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
+ break;
+
+ /*
+ * The core ought to have powered down, but didn't &
+ * now we don't really know what state it's in. It's
+ * likely that its _pwr_up pin has been wired to logic
+ * 1 & it powered back up as soon as we powered it
+ * down...
+ *
+ * The best we can do is warn the user & continue in
+ * the hope that the core is doing nothing harmful &
+ * might behave properly if we online it later.
+ */
+ if (WARN(ktime_after(ktime_get(), fail_time),
+ "CPU%u hasn't powered down, seq. state %u\n",
+ cpu, stat))
+ break;
+ } while (1);
/* Indicate the core is powered off */
bitmap_clear(core_power, core, 1);
@@ -539,7 +574,7 @@ static void cps_cpu_die(unsigned int cpu)
panic("Failed to call remote sibling CPU\n");
} else if (cpu_has_vp) {
do {
- mips_cm_lock_other(core, vpe_id);
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
stat = read_cpc_co_vp_running();
mips_cm_unlock_other();
} while (stat & (1 << vpe_id));
@@ -548,7 +583,7 @@ static void cps_cpu_die(unsigned int cpu)
#endif /* CONFIG_HOTPLUG_CPU */
-static struct plat_smp_ops cps_smp_ops = {
+static const struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
.boot_secondary = cps_boot_secondary,
@@ -564,7 +599,7 @@ static struct plat_smp_ops cps_smp_ops = {
bool mips_cps_smp_in_use(void)
{
- extern struct plat_smp_ops *mp_ops;
+ extern const struct plat_smp_ops *mp_ops;
return mp_ops == &cps_smp_ops;
}
@@ -576,7 +611,7 @@ int register_cps_smp_ops(void)
}
/* check we have a GIC - we need one for IPIs */
- if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
return -ENODEV;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index ed6b4df583ea..94ab3276b48c 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
-#include <linux/irqchip/mips-gic.h>
#include <linux/compiler.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
@@ -36,6 +35,7 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
+#include <asm/mips-cps.h>
static void __init smvp_copy_vpe_config(void)
{
@@ -83,7 +83,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
if (tc != 0)
smvp_copy_vpe_config();
- cpu_data[ncpu].vpe_id = tc;
+ cpu_set_vpe_id(&cpu_data[ncpu], tc);
return ncpu;
}
@@ -118,14 +118,12 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
static void vsmp_init_secondary(void)
{
-#ifdef CONFIG_MIPS_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
- if (gic_present)
+ if (mips_gic_present())
change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7);
else
-#endif
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
}
@@ -152,7 +150,7 @@ static void vsmp_smp_finish(void)
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
-static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
+static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
@@ -184,6 +182,8 @@ static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
+
+ return 0;
}
/*
@@ -239,7 +239,7 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
mips_mt_set_cpuoptions();
}
-struct plat_smp_ops vsmp_smp_ops = {
+const struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = mips_smp_send_ipi_single,
.send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 17878d71ef2b..525d3196f793 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -39,8 +39,9 @@ static void up_smp_finish(void)
/*
* Firmware CPU startup hook
*/
-static void up_boot_secondary(int cpu, struct task_struct *idle)
+static int up_boot_secondary(int cpu, struct task_struct *idle)
{
+ return 0;
}
static void __init up_smp_setup(void)
@@ -63,7 +64,7 @@ static void up_cpu_die(unsigned int cpu)
}
#endif
-struct plat_smp_ops up_smp_ops = {
+const struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aba1afb64b62..bbe19b64def5 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -96,8 +96,7 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu(i, &cpu_sibling_setup_map) {
- if (cpu_data[cpu].package == cpu_data[i].package &&
- cpu_data[cpu].core == cpu_data[i].core) {
+ if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
@@ -134,8 +133,7 @@ void calculate_cpu_foreign_map(void)
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
- if (cpu_data[i].package == cpu_data[k].package &&
- cpu_data[i].core == cpu_data[k].core)
+ if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
@@ -146,10 +144,10 @@ void calculate_cpu_foreign_map(void)
&temp_foreign_map, &cpu_sibling_map[i]);
}
-struct plat_smp_ops *mp_ops;
+const struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
-void register_smp_ops(struct plat_smp_ops *ops)
+void register_smp_ops(const struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -186,13 +184,13 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
if (mips_cpc_present()) {
for_each_cpu(cpu, mask) {
- core = cpu_data[cpu].core;
-
- if (core == current_cpu_data.core)
+ if (cpus_are_siblings(cpu, smp_processor_id()))
continue;
+ core = cpu_core(&cpu_data[cpu]);
+
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
- mips_cm_lock_other(core, 0);
+ mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
@@ -335,6 +333,9 @@ int mips_smp_ipi_free(const struct cpumask *mask)
static int __init mips_smp_ipi_init(void)
{
+ if (num_possible_cpus() == 1)
+ return 0;
+
mips_smp_ipi_allocate(cpu_possible_mask);
call_desc = irq_to_desc(call_virq);
@@ -373,9 +374,6 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- complete(&cpu_running);
- synchronise_count_slave(cpu);
-
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -383,6 +381,9 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
+ complete(&cpu_running);
+ synchronise_count_slave(cpu);
+
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
@@ -438,7 +439,11 @@ void smp_prepare_boot_cpu(void)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- mp_ops->boot_secondary(cpu, tidle);
+ int err;
+
+ err = mp_ops->boot_secondary(cpu, tidle);
+ if (err)
+ return err;
/*
* We must check for timeout here, as the CPU will not be marked
@@ -645,12 +650,12 @@ EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
-static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
+static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
atomic_t *count;
- struct call_single_data *csd;
+ call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
@@ -671,7 +676,7 @@ static void tick_broadcast_callee(void *info)
static int __init tick_broadcast_init(void)
{
- struct call_single_data *csd;
+ call_single_data_t *csd;
int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dfa7f5796c7..58c6f634b550 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -29,6 +29,7 @@
#include <linux/sched/task_stack.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -131,16 +132,14 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
__asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
- "1: ll %[old], (%[addr]) \n"
+ "1: \n"
+ user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
- "2: sc %[tmp], (%[addr]) \n"
- " bnez %[tmp], 4f \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 1b \n"
"3: \n"
" .insn \n"
- " .subsection 2 \n"
- "4: b 1b \n"
- " .previous \n"
- " \n"
" .section .fixup,\"ax\" \n"
"5: li %[err], %[efault] \n"
" j 3b \n"
@@ -192,6 +191,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
unreachable();
}
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index c036157fb891..a6ebc8135112 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -72,20 +72,6 @@ EXPORT_SYMBOL(perf_irq);
unsigned int mips_hpt_frequency;
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
-/*
- * This function exists in order to cause an error due to a duplicate
- * definition if platform code should have its own implementation. The hook
- * to use instead is plat_time_init. plat_time_init does not receive the
- * irqaction pointer argument anymore. This is because any function which
- * initializes an interrupt timer now takes care of its own request_irq rsp.
- * setup_irq calls and each clock_event_device should use its own
- * struct irqrequest.
- */
-void __init plat_timer_setup(void)
-{
- BUG();
-}
-
static __init int cpu_has_mfc0_count_bug(void)
{
switch (current_cpu_type()) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38dfa27730ff..5669d3b8bd38 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -50,9 +50,8 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/idle.h>
-#include <asm/mips-cm.h>
+#include <asm/mips-cps.h>
#include <asm/mips-r2-to-r6-emul.h>
-#include <asm/mips-cm.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
@@ -429,7 +428,8 @@ static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
- e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
+ e = search_extable(__start___dbe_table,
+ __stop___dbe_table - __start___dbe_table, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
@@ -733,8 +733,7 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
si.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
+
force_sig_info(SIGFPE, &si, tsk);
}
@@ -1672,7 +1671,7 @@ static inline void parity_protection_init(void)
/* Probe L2 ECC support */
gcr_ectl = read_gcr_err_control();
- if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) ||
+ if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
!(cp0_ectl & ERRCTL_PE)) {
/*
* One of L1 or L2 ECC checking isn't supported,
@@ -1692,12 +1691,12 @@ static inline void parity_protection_init(void)
/* Configure L2 ECC checking */
if (l2parity)
- gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
+ gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
else
- gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
+ gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
write_gcr_err_control(gcr_ectl);
gcr_ectl = read_gcr_err_control();
- gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
+ gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
WARN_ON(!!gcr_ectl != l2parity);
pr_info("Cache parity protection %sabled\n",
@@ -2427,21 +2426,6 @@ void __init trap_init(void)
set_except_vector(EXCCODE_TR, handle_tr);
set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
- if (current_cpu_type() == CPU_R6000 ||
- current_cpu_type() == CPU_R6000A) {
- /*
- * The R6000 is the only R-series CPU that features a machine
- * check exception (similar to the R4000 cache error) and
- * unaligned ldc1/sdc1 exception. The handlers have not been
- * written yet. Well, anyway there is no R6000 machine on the
- * current list of targets for Linux/MIPS.
- * (Duh, crap, there is someone with a triple R6k machine)
- */
- //set_except_vector(14, handle_mc);
- //set_except_vector(15, handle_ndc);
- }
-
-
if (board_nmi_handler_setup)
board_nmi_handler_setup();
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index f806ee56e639..2d0b912f9e3e 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -939,88 +939,114 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* The remaining opcodes are the ones that are really of
* interest.
*/
-#ifdef CONFIG_EVA
case spec3_op:
- /*
- * we can land here only from kernel accessing user memory,
- * so we need to "switch" the address limit to user space, so
- * address check can work properly.
- */
- seg = get_fs();
- set_fs(USER_DS);
- switch (insn.spec3_format.func) {
- case lhe_op:
- if (!access_ok(VERIFY_READ, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- LoadHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case lwe_op:
- if (!access_ok(VERIFY_READ, addr, 4)) {
- set_fs(seg);
- goto sigbus;
+ if (insn.dsp_format.func == lx_op) {
+ switch (insn.dsp_format.op) {
+ case lwx_op:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ case lhx_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ default:
+ goto sigill;
}
+ }
+#ifdef CONFIG_EVA
+ else {
+ /*
+ * we can land here only from kernel accessing user
+ * memory, so we need to "switch" the address limit to
+ * user space, so that address check can work properly.
+ */
+ seg = get_fs();
+ set_fs(USER_DS);
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(VERIFY_READ, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
LoadWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case lhue_op:
- if (!access_ok(VERIFY_READ, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- LoadHWUE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- compute_return_epc(regs);
- regs->regs[insn.spec3_format.rt] = value;
- break;
- case she_op:
- if (!access_ok(VERIFY_WRITE, addr, 2)) {
- set_fs(seg);
- goto sigbus;
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
- StoreHWE(addr, value, res);
- if (res) {
- set_fs(seg);
- goto fault;
- }
- break;
- case swe_op:
- if (!access_ok(VERIFY_WRITE, addr, 4)) {
- set_fs(seg);
- goto sigbus;
- }
- compute_return_epc(regs);
- value = regs->regs[insn.spec3_format.rt];
- StoreWE(addr, value, res);
- if (res) {
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWUE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ case swe_op:
+ if (!access_ok(VERIFY_WRITE, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ default:
set_fs(seg);
- goto fault;
+ goto sigill;
}
- break;
- default:
set_fs(seg);
- goto sigill;
}
- set_fs(seg);
- break;
#endif
+ break;
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
@@ -1352,7 +1378,7 @@ sigill:
const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
-const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
static void emulate_load_store_microMIPS(struct pt_regs *regs,
void __user *addr)
@@ -1984,6 +2010,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
u16 __user *pc16;
unsigned long origpc;
union mips16e_instruction mips16inst, oldinst;
+ unsigned int opcode;
+ int extended = 0;
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
@@ -1996,6 +2024,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
/* skip EXTEND instruction */
if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ extended = 1;
pc16++;
__get_user(mips16inst.full, pc16);
} else if (delay_slot(regs)) {
@@ -2008,7 +2037,8 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
goto sigbus;
}
- switch (mips16inst.ri.opcode) {
+ opcode = mips16inst.ri.opcode;
+ switch (opcode) {
case MIPS16e_i64_op: /* I64 or RI64 instruction */
switch (mips16inst.i64.func) { /* I64/RI64 func field check */
case MIPS16e_ldpc_func:
@@ -2028,9 +2058,40 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
goto sigbus;
case MIPS16e_swsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* SWSP */
+ case 1: /* SWGP */
+ break;
+ case 2: /* SHGP */
+ opcode = MIPS16e_sh_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
case MIPS16e_lwpc_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
case MIPS16e_lwsp_op:
reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* LWSP */
+ case 1: /* LWGP */
+ break;
+ case 2: /* LHGP */
+ opcode = MIPS16e_lh_op;
+ break;
+ case 4: /* LHUGP */
+ opcode = MIPS16e_lhu_op;
+ break;
+ default:
+ goto sigbus;
+ }
break;
case MIPS16e_i8_op:
@@ -2044,7 +2105,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
break;
}
- switch (mips16inst.ri.opcode) {
+ switch (opcode) {
case MIPS16e_lb_op:
case MIPS16e_lbu_op:
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 093517e85a6c..019035d7225c 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -13,13 +13,13 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/irqchip/mips-gic.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <asm/abi.h>
+#include <asm/mips-cps.h>
#include <asm/vdso.h>
/* Kernel-provided data used by the VDSO. */
@@ -99,9 +99,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
- unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn;
struct vm_area_struct *vma;
- struct resource gic_res;
int ret;
if (down_write_killable(&mm->mmap_sem))
@@ -125,7 +124,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* only map a page even though the total area is 64K, as we only need
* the counter registers at the start.
*/
- gic_size = gic_present ? PAGE_SIZE : 0;
+ gic_size = mips_gic_present() ? PAGE_SIZE : 0;
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
@@ -148,13 +147,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map GIC user page. */
if (gic_size) {
- ret = gic_get_usm_range(&gic_res);
- if (ret)
- goto out;
+ gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT;
- ret = io_remap_pfn_range(vma, base,
- gic_res.start >> PAGE_SHIFT,
- gic_size,
+ ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(PAGE_READONLY));
if (ret)
goto out;