diff options
author | Linus Torvalds | 2017-07-08 21:14:14 +0200 |
---|---|---|
committer | Linus Torvalds | 2017-07-08 21:14:14 +0200 |
commit | fe1b518075d86976db3a93e7e8b640d24d477519 (patch) | |
tree | e4d98d18c4637d85d279d0600afb7cac6a1759f0 /arch/sparc/include/asm/spinlock_64.h | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
parent | sparc: kernel: pmc: make of_device_ids const. (diff) | |
download | kernel-qcow2-linux-fe1b518075d86976db3a93e7e8b640d24d477519.tar.gz kernel-qcow2-linux-fe1b518075d86976db3a93e7e8b640d24d477519.tar.xz kernel-qcow2-linux-fe1b518075d86976db3a93e7e8b640d24d477519.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller:
1) Queued spinlocks and rwlocks for sparc64, from Babu Moger.
2) Some const'ification from Arvind Yadav.
3) LDC/VIO driver infrastructure changes to facilitate future upcoming
drivers, from Jag Raman.
4) Initialize sched_clock() et al. early so that the initial printk
timestamps are all done while the implementation is available and
functioning. From Pavel Tatashin.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (38 commits)
sparc: kernel: pmc: make of_device_ids const.
sparc64: fix typo in property
sparc64: add port_id to VIO device metadata
sparc64: Enhance search for VIO device in MDESC
sparc64: enhance VIO device probing
sparc64: check if a client is allowed to register for MDESC notifications
sparc64: remove restriction on VIO device name size
sparc64: refactor code to obtain cfg_handle property from MDESC
sparc64: add MDESC node name property to VIO device metadata
sparc64: mdesc: use __GFP_REPEAT action modifier for VM allocation
sparc64: expand MDESC interface
sparc64: skip handshake for LDC channels in RAW mode
sparc64: specify the device class in VIO version info. packet
sparc64: ensure VIO operations are defined while being used
sparc: kernel: apc: make of_device_ids const
sparc/time: make of_device_ids const
sparc64: broken %tick frequency on spitfire cpus
sparc64: use prom interface to get %stick frequency
sparc64: optimize functions that access tick
sparc64: add hot-patched and inlined get_tick()
...
Diffstat (limited to 'arch/sparc/include/asm/spinlock_64.h')
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 208 |
1 files changed, 2 insertions, 206 deletions
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 07c9f2e9bf57..f7028f5e1a5a 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -10,216 +10,12 @@ #include <asm/processor.h> #include <asm/barrier.h> - -/* To get debugging spinlocks which detect and catch - * deadlock situations, set CONFIG_DEBUG_SPINLOCK - * and rebuild your kernel. - */ - -/* Because we play games to save cycles in the non-contention case, we - * need to be extra careful about branch targets into the "spinning" - * code. They live in their own section, but the newer V9 branches - * have a shorter range than the traditional 32-bit sparc branch - * variants. The rule is that the branches that go into and out of - * the spinner sections must be pre-V9 branches. - */ - -#define arch_spin_is_locked(lp) ((lp)->lock != 0) - -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_cond_load_acquire(&lock->lock, !VAL); -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldstub [%1], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: ldub [%1], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 1b\n" -" .previous" - : "=&r" (tmp) - : "r" (lock) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long result; - - __asm__ __volatile__( -" ldstub [%1], %0\n" - : "=r" (result) - : "r" (lock) - : "memory"); - - return (result == 0UL); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __asm__ __volatile__( -" stb %%g0, [%0]" - : /* No outputs */ - : "r" (lock) - : "memory"); -} - -static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: ldstub [%2], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: rdpr %%pil, %1\n" -" wrpr %3, %%pil\n" -"3: ldub [%2], %0\n" -" brnz,pt %0, 3b\n" -" nop\n" -" ba,pt %%xcc, 1b\n" -" wrpr %1, %%pil\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r"(lock), "r"(flags) - : "memory"); -} - -/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: ldsw [%2], %0\n" -" brlz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - int tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,a,pn %0, 2f\n" -" mov 0, %0\n" -" add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" mov 1, %0\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); - - return tmp1; -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" sub %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%xcc, 1b\n" -" nop" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2; - - mask = 0x80000000UL; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" brnz,pn %0, 2f\n" -"4: or %0, %3, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: lduw [%2], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock), "r" (mask) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( -" stw %%g0, [%0]" - : /* no outputs */ - : "r" (lock) - : "memory"); -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2, result; - - mask = 0x80000000UL; - - __asm__ __volatile__( -" mov 0, %2\n" -"1: lduw [%3], %0\n" -" brnz,pn %0, 2f\n" -" or %0, %4, %1\n" -" cas [%3], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" mov 1, %2\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) - : "r" (lock), "r" (mask) - : "memory"); - - return result; -} +#include <asm/qrwlock.h> +#include <asm/qspinlock.h> #define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) -#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define arch_write_can_lock(rw) (!(rw)->lock) - #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() |