summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/atomic.h
diff options
context:
space:
mode:
authorNoam Camus2015-05-16 16:49:35 +0200
committerVineet Gupta2016-05-09 06:02:33 +0200
commita5a10d99a946602cf4ae50eadc65c2480dbd2e56 (patch)
tree613088aec1988af74826a53277e0dde3812bccc2 /arch/arc/include/asm/atomic.h
parentARC: [plat-eznps] Use dedicated user stack top (diff)
downloadkernel-qcow2-linux-a5a10d99a946602cf4ae50eadc65c2480dbd2e56.tar.gz
kernel-qcow2-linux-a5a10d99a946602cf4ae50eadc65c2480dbd2e56.tar.xz
kernel-qcow2-linux-a5a10d99a946602cf4ae50eadc65c2480dbd2e56.zip
ARC: [plat-eznps] Use dedicated atomic/bitops/cmpxchg
We need our own implementaions since we lack LLSC support. Our extended ISA provided with optimized solution for all 32bit operations we see in these three headers. Signed-off-by: Noam Camus <noamc@ezchip.com>
Diffstat (limited to 'arch/arc/include/asm/atomic.h')
-rw-r--r--arch/arc/include/asm/atomic.h83
1 files changed, 80 insertions, 3 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7730d302cadb..5f3dcbbc0cc9 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,6 +17,8 @@
#include <asm/barrier.h>
#include <asm/smp.h>
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
#define atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
@@ -180,13 +182,88 @@ ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline int atomic_read(const atomic_t *v)
+{
+ int temp;
+
+ __asm__ __volatile__(
+ " ld.di %0, [%1]"
+ : "=r"(temp)
+ : "r"(&v->counter)
+ : "memory");
+ return temp;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__(
+ " st.di %0,[%1]"
+ :
+ : "r"(i), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(i), "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned int temp = i; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(temp) \
+ : "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ temp c_op i; \
+ \
+ return temp; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
+#define atomic_sub(i, v) atomic_add(-(i), (v))
+#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
+
+ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
+ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t