diff options
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r-- | arch/mips/include/asm/bitops.h | 128 | ||||
-rw-r--r-- | arch/mips/include/asm/compat.h | 2 | ||||
-rw-r--r-- | arch/mips/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/mips/include/asm/hugetlb.h | 12 | ||||
-rw-r--r-- | arch/mips/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/irqflags.h | 207 | ||||
-rw-r--r-- | arch/mips/include/asm/thread_info.h | 6 |
7 files changed, 129 insertions, 229 deletions
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 82ad35ce2b45..46ac73abd5ee 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -14,7 +14,6 @@ #endif #include <linux/compiler.h> -#include <linux/irqflags.h> #include <linux/types.h> #include <asm/barrier.h> #include <asm/byteorder.h> /* sigh ... */ @@ -44,6 +43,24 @@ #define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__after_clear_bit() smp_llsc_mb() + +/* + * These are the "slower" versions of the functions and are in bitops.c. + * These functions call raw_local_irq_{save,restore}(). + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr); + + /* * set_bit - Atomically set a bit in memory * @nr: the bit to set @@ -57,7 +74,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + __mips_set_bit(nr, addr); } /* @@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + __mips_clear_bit(nr, addr); } /* @@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad */ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + __mips_change_bit(nr, addr); } /* @@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit(nr, addr); smp_llsc_mb(); @@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr, static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit_lock(nr, addr); smp_llsc_mb(); @@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_clear_bit(nr, addr); smp_llsc_mb(); @@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr, static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_change_bit(nr, addr); smp_llsc_mb(); diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 58277e0e9cd4..3c5d1464b7bd 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -290,7 +290,7 @@ struct compat_shmid64_ds { static inline int is_compat_task(void) { - return test_thread_flag(TIF_32BIT); + return test_thread_flag(TIF_32BIT_ADDR); } #endif /* _ASM_COMPAT_H */ diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index be39a12901c6..006b43e38a9c 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -40,6 +40,8 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_mapping_error(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_mapping_error(dev, mask); return ops->mapping_error(dev, mask); } diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index bd94946a18f3..ef99db994c2f 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -95,7 +95,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + /* + * There could be some standard sized pages in there, + * get them all. + */ + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29d9c23c20c7..ff2e0345e013 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -15,6 +15,7 @@ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/types.h> +#include <linux/irqflags.h> #include <asm/addrspace.h> #include <asm/bug.h> diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909c..9f3384c789d7 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@ #include <linux/compiler.h> #include <asm/hazards.h> -__asm__( - " .macro arch_local_irq_enable \n" - " .set push \n" - " .set reorder \n" - " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) - " ei \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1e \n" - " mtc0 $1,$12 \n" -#endif - " irq_enable_hazard \n" - " .set pop \n" - " .endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */ __asm__( " .macro arch_local_irq_disable\n" " .set push \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " ori $1, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1f \n" - " .set noreorder \n" - " mtc0 $1,$12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) : "memory"); } -__asm__( - " .macro arch_local_save_flags flags \n" - " .set push \n" - " .set reorder \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" -#else - " mfc0 \\flags, $12 \n" -#endif - " .set pop \n" - " .endm \n"); - -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); - return flags; -} __asm__( " .macro arch_local_irq_save result \n" " .set push \n" " .set reorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \\result \n" " andi \\result, 1 \n" -#else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" - " xori $1, 0x1f \n" - " .set noreorder \n" - " mtc0 $1, $12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) return flags; } + __asm__( " .macro arch_local_irq_restore flags \n" " .set push \n" " .set noreorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ " beqz \\flags, 1f \n" - " di \n" + " di \n" " ei \n" "1: \n" -#elif defined(CONFIG_CPU_MIPSR2) +#else /* * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" " ins $1, \\flags, 0, 1 \n" " mtc0 $1, $12 \n" -#else - " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" - " ori $1, 0x1f \n" - " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" #endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); - static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - __asm__ __volatile__( "arch_local_irq_restore\t%0" : "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( + " .macro arch_local_irq_enable \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + " ei \n" +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1e \n" + " mtc0 $1,$12 \n" +#endif + " irq_enable_hazard \n" + " .set pop \n" + " .endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( + "arch_local_irq_enable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + + +__asm__( + " .macro arch_local_save_flags flags \n" + " .set push \n" + " .set reorder \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\flags, $2, 1 \n" +#else + " mfc0 \\flags, $12 \n" +#endif + " .set pop \n" + " .endm \n"); + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("arch_local_save_flags %0" : "=r" (flags)); + return flags; +} + static inline int arch_irqs_disabled_flags(unsigned long flags) { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #endif } -#endif +#endif /* #ifndef __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 8debe9e91754..18806a52061c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ -#ifdef CONFIG_MIPS32_O32 -#define TIF_32BIT TIF_32BIT_REGS -#elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT _TIF_32BIT_ADDR -#endif /* CONFIG_MIPS32_O32 */ - #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |