From 3d94ae0c70a71a9824479366775e2c7679a57d94 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 28 Sep 2011 11:49:28 -0700 Subject: x86/cmpxchg: add a locked add() helper Mostly to remove some conditional code in spinlock.h. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/cmpxchg.h | 42 +++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/spinlock.h | 15 +-------------- 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 5d3acdf5a7a6..49eade13161c 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void) __compiletime_error("Bad argument size for cmpxchg"); extern void __xadd_wrong_size(void) __compiletime_error("Bad argument size for xadd"); +extern void __add_wrong_size(void) + __compiletime_error("Bad argument size for add"); /* * Constants for operation sizes. On 32-bit, the 64-bit size it set to @@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") +#define __add(ptr, inc, lock) \ + ({ \ + __typeof__ (*(ptr)) __ret = (inc); \ + switch (sizeof(*(ptr))) { \ + case __X86_CASE_B: \ + asm volatile (lock "addb %b1, %0\n" \ + : "+m" (*(ptr)) : "ri" (inc) \ + : "memory", "cc"); \ + break; \ + case __X86_CASE_W: \ + asm volatile (lock "addw %w1, %0\n" \ + : "+m" (*(ptr)) : "ri" (inc) \ + : "memory", "cc"); \ + break; \ + case __X86_CASE_L: \ + asm volatile (lock "addl %1, %0\n" \ + : "+m" (*(ptr)) : "ri" (inc) \ + : "memory", "cc"); \ + break; \ + case __X86_CASE_Q: \ + asm volatile (lock "addq %1, %0\n" \ + : "+m" (*(ptr)) : "ri" (inc) \ + : "memory", "cc"); \ + break; \ + default: \ + __add_wrong_size(); \ + } \ + __ret; \ + }) + +/* + * add_*() adds "inc" to "*ptr" + * + * __add() takes a lock prefix + * add_smp() is locked when multiple CPUs are online + * add_sync() is always locked + */ +#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) +#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") + #endif /* ASM_X86_CMPXCHG_H */ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 972c260919a3..a82c2bf504b6 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } -#if (NR_CPUS < 256) static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { - asm volatile(UNLOCK_LOCK_PREFIX "incb %0" - : "+m" (lock->head_tail) - : - : "memory", "cc"); + __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); } -#else -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) -{ - asm volatile(UNLOCK_LOCK_PREFIX "incw %0" - : "+m" (lock->head_tail) - : - : "memory", "cc"); -} -#endif static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { -- cgit v1.2.3-55-g7522 From 31a8394e069e47dc47f4c29e4213aa943342f19f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 30 Sep 2011 12:14:10 -0700 Subject: x86: consolidate xchg and xadd macros They both have a basic "put new value in location, return old value" pattern, so they can use the same macro easily. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/cmpxchg.h | 114 +++++++++++++---------------------------- 1 file changed, 36 insertions(+), 78 deletions(-) diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 49eade13161c..5488e10b9dba 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -33,60 +33,47 @@ extern void __add_wrong_size(void) #define __X86_CASE_Q -1 /* sizeof will never return -1 */ #endif +/* + * An exchange-type operation, which takes a value and a pointer, and + * returns a the old value. + */ +#define __xchg_op(ptr, arg, op, lock) \ + ({ \ + __typeof__ (*(ptr)) __ret = (arg); \ + switch (sizeof(*(ptr))) { \ + case __X86_CASE_B: \ + asm volatile (lock #op "b %b0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_W: \ + asm volatile (lock #op "w %w0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_L: \ + asm volatile (lock #op "l %0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_Q: \ + asm volatile (lock #op "q %q0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + default: \ + __ ## op ## _wrong_size(); \ + } \ + __ret; \ + }) + /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. * Since this is generally used to protect other memory information, we * use "asm volatile" and "memory" clobbers to prevent gcc from moving * information around. */ -#define __xchg(x, ptr, size) \ -({ \ - __typeof(*(ptr)) __x = (x); \ - switch (size) { \ - case __X86_CASE_B: \ - { \ - volatile u8 *__ptr = (volatile u8 *)(ptr); \ - asm volatile("xchgb %0,%1" \ - : "=q" (__x), "+m" (*__ptr) \ - : "0" (__x) \ - : "memory"); \ - break; \ - } \ - case __X86_CASE_W: \ - { \ - volatile u16 *__ptr = (volatile u16 *)(ptr); \ - asm volatile("xchgw %0,%1" \ - : "=r" (__x), "+m" (*__ptr) \ - : "0" (__x) \ - : "memory"); \ - break; \ - } \ - case __X86_CASE_L: \ - { \ - volatile u32 *__ptr = (volatile u32 *)(ptr); \ - asm volatile("xchgl %0,%1" \ - : "=r" (__x), "+m" (*__ptr) \ - : "0" (__x) \ - : "memory"); \ - break; \ - } \ - case __X86_CASE_Q: \ - { \ - volatile u64 *__ptr = (volatile u64 *)(ptr); \ - asm volatile("xchgq %0,%1" \ - : "=r" (__x), "+m" (*__ptr) \ - : "0" (__x) \ - : "memory"); \ - break; \ - } \ - default: \ - __xchg_wrong_size(); \ - } \ - __x; \ -}) - -#define xchg(ptr, v) \ - __xchg((v), (ptr), sizeof(*ptr)) +#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") /* * Atomic compare and exchange. Compare OLD with MEM, if identical, @@ -167,36 +154,6 @@ extern void __add_wrong_size(void) __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) #endif -#define __xadd(ptr, inc, lock) \ - ({ \ - __typeof__ (*(ptr)) __ret = (inc); \ - switch (sizeof(*(ptr))) { \ - case __X86_CASE_B: \ - asm volatile (lock "xaddb %b0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_W: \ - asm volatile (lock "xaddw %w0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_L: \ - asm volatile (lock "xaddl %0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - case __X86_CASE_Q: \ - asm volatile (lock "xaddq %q0, %1\n" \ - : "+r" (__ret), "+m" (*(ptr)) \ - : : "memory", "cc"); \ - break; \ - default: \ - __xadd_wrong_size(); \ - } \ - __ret; \ - }) - /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * value of "*ptr". @@ -205,6 +162,7 @@ extern void __add_wrong_size(void) * xadd_sync() is always locked * xadd_local() is never locked */ +#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") -- cgit v1.2.3-55-g7522