summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/bitops.h4
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/io.h3
-rw-r--r--arch/mips/include/asm/jump_label.h17
-rw-r--r--arch/mips/include/asm/mmiowb.h11
-rw-r--r--arch/mips/include/asm/spinlock.h15
-rw-r--r--arch/mips/include/asm/syscall.h9
-rw-r--r--arch/mips/include/asm/tlb.h17
-rw-r--r--arch/mips/include/asm/uasm.h8
9 files changed, 53 insertions, 32 deletions
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 830c93a010c3..9a466dde9b96 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -482,7 +482,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *
* Return the bit position (0..63) of the most significant 1 bit in a word
* Returns -1 if no 1 bit exists
*/
-static inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __fls(unsigned long word)
{
int num;
@@ -548,7 +548,7 @@ static inline unsigned long __fls(unsigned long word)
* Returns 0..SZLONG-1
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long __ffs(unsigned long word)
{
return __fls(word & -word);
}
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index a301a8f4bc66..235bc2f52113 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -92,6 +92,7 @@ extern unsigned long mips_machtype;
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
+#define BOOT_MEM_NOMAP 5
/*
* A memory map that's built upon what was determined
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 845fbbc7a2e3..29997e42480e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
-/* Some callers use this older API instead. */
-#define mmiowb() iobarrier_w()
-
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e77672539e8e..3185fd3220ec 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -11,6 +11,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/isa-rev.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -21,15 +22,20 @@
#endif
#ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+# define B_INSN "b32"
+# define J_INSN "j32"
+#elif MIPS_ISA_REV >= 6
+# define B_INSN "bc"
+# define J_INSN "bc"
#else
-#define NOP_INSN "nop"
+# define B_INSN "b"
+# define J_INSN "j"
#endif
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\t" NOP_INSN "\n\t"
- "nop\n\t"
+ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+ "2:\t.insn\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
@@ -42,8 +48,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\tj %l[l_yes]\n\t"
- "nop\n\t"
+ asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644
index 000000000000..a40824e3ef8e
--- /dev/null
+++ b/arch/mips/include/asm/mmiowb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ee81297d9117..8a88eb265516 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -11,6 +11,21 @@
#include <asm/processor.h>
#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
#include <asm/qspinlock.h>
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6cf8ffb5367e..acf80ae0a430 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
+ unsigned int i = 0;
+ unsigned int n = 6;
int ret;
/* O32 ABI syscall() */
@@ -140,14 +141,14 @@ extern const unsigned long sys_call_table[];
extern const unsigned long sys32_call_table[];
extern const unsigned long sysn32_call_table[];
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_MIPS;
#ifdef CONFIG_64BIT
- if (!test_thread_flag(TIF_32BIT_REGS)) {
+ if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) {
arch |= __AUDIT_ARCH_64BIT;
/* N32 sets only TIF_32BIT_ADDR */
- if (test_thread_flag(TIF_32BIT_ADDR))
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
}
#endif
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index b6823b9e94da..90f3ad76d9e0 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,23 +5,6 @@
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
-/*
- * MIPS doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for area to be unmapped.
- */
-#define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#define _UNIQUE_ENTRYHI(base, idx) \
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b1990dd75f27..f7effca791a5 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -86,14 +86,18 @@ Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
Ip_u1u2(_ddivu);
+Ip_u3u1u2(_ddivu_r6);
Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u2u1msbu3(_dinsu);
Ip_u1u2(_divu);
+Ip_u3u1u2(_divu_r6);
Ip_u1u2u3(_dmfc0);
+Ip_u3u1u2(_dmodu);
Ip_u1u2u3(_dmtc0);
Ip_u1u2(_dmultu);
+Ip_u3u1u2(_dmulu);
Ip_u2u1u3(_drotr);
Ip_u2u1u3(_drotr32);
Ip_u2u1(_dsbh);
@@ -131,6 +135,7 @@ Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mfhc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
+Ip_u3u1u2(_modu);
Ip_u3u1u2(_movn);
Ip_u3u1u2(_movz);
Ip_u1u2u3(_mtc0);
@@ -139,6 +144,7 @@ Ip_u1(_mthi);
Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u1u2(_multu);
+Ip_u3u1u2(_mulu);
Ip_u3u1u2(_nor);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
@@ -149,6 +155,8 @@ Ip_u2s3u1(_sb);
Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
+Ip_u3u1u2(_seleqz);
+Ip_u3u1u2(_selnez);
Ip_u2s3u1(_sh);
Ip_u2u1u3(_sll);
Ip_u3u2u1(_sllv);