summaryrefslogtreecommitdiffstats
path: root/include/qemu/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/qemu/atomic.h')
-rw-r--r--include/qemu/atomic.h40
1 files changed, 19 insertions, 21 deletions
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 43b06458f1..c4f6950fcb 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -72,17 +72,17 @@
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
*/
-#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
-#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
-#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
+#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
+#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
+#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
/* Most compilers currently treat consume and acquire the same, but really
* no processors except Alpha need a barrier here. Leave it in if
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
*/
#if defined(__SANITIZE_THREAD__)
-#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
-#elsif defined(__alpha__)
+#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
+#elif defined(__alpha__)
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
#else
#define smp_read_barrier_depends() barrier()
@@ -92,19 +92,22 @@
/* Weak atomic operations prevent the compiler moving other
* loads/stores past the atomic operation load/store. However there is
* no explicit memory barrier for the processor.
+ *
+ * The C11 memory model says that variables that are accessed from
+ * different threads should at least be done with __ATOMIC_RELAXED
+ * primitives or the result is undefined. Generally this has little to
+ * no effect on the generated code but not using the atomic primitives
+ * will get flagged by sanitizers as a violation.
*/
#define atomic_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof_strip_qual(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
- _val; \
+ __atomic_load_n(ptr, __ATOMIC_RELAXED); \
})
#define atomic_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
} while(0)
/* See above: most compilers currently treat consume and acquire the
@@ -129,8 +132,7 @@
#define atomic_rcu_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
/* atomic_mb_read/set semantics map Java volatile variables. They are
@@ -153,9 +155,8 @@
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof(*ptr) _val = (i); \
smp_wmb(); \
- __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELAXED); \
smp_mb(); \
} while(0)
#else
@@ -169,8 +170,7 @@
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
+ __atomic_store_n(ptr, i, __ATOMIC_SEQ_CST); \
} while(0)
#endif
@@ -179,17 +179,15 @@
#define atomic_xchg(ptr, i) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof_strip_qual(*ptr) _new = (i), _old; \
- __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
- _old; \
+ __atomic_exchange_n(ptr, i, __ATOMIC_SEQ_CST); \
})
/* Returns the eventual value, failed or not */
#define atomic_cmpxchg(ptr, old, new) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
- typeof_strip_qual(*ptr) _old = (old), _new = (new); \
- __atomic_compare_exchange(ptr, &_old, &_new, false, \
+ typeof_strip_qual(*ptr) _old = (old); \
+ __atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \
})