summaryrefslogtreecommitdiffstats
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/tcg-op.c328
-rw-r--r--tcg/tcg-op.h44
-rw-r--r--tcg/tcg-runtime.h75
-rw-r--r--tcg/tcg.h53
4 files changed, 500 insertions, 0 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 291d50bb7d..65e36637d0 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -1975,3 +1975,331 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
addr, trace_mem_get_info(memop, 1));
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
}
+
+static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
+{
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_gen_ext8s_i32(ret, val);
+ break;
+ case MO_UB:
+ tcg_gen_ext8u_i32(ret, val);
+ break;
+ case MO_SW:
+ tcg_gen_ext16s_i32(ret, val);
+ break;
+ case MO_UW:
+ tcg_gen_ext16u_i32(ret, val);
+ break;
+ default:
+ tcg_gen_mov_i32(ret, val);
+ break;
+ }
+}
+
+static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc)
+{
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_gen_ext8s_i64(ret, val);
+ break;
+ case MO_UB:
+ tcg_gen_ext8u_i64(ret, val);
+ break;
+ case MO_SW:
+ tcg_gen_ext16s_i64(ret, val);
+ break;
+ case MO_UW:
+ tcg_gen_ext16u_i64(ret, val);
+ break;
+ case MO_SL:
+ tcg_gen_ext32s_i64(ret, val);
+ break;
+ case MO_UL:
+ tcg_gen_ext32u_i64(ret, val);
+ break;
+ default:
+ tcg_gen_mov_i64(ret, val);
+ break;
+ }
+}
+
+#ifdef CONFIG_SOFTMMU
+typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
+ TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
+ TCGv_i64, TCGv_i64, TCGv_i32);
+typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
+ TCGv_i32, TCGv_i32);
+typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
+ TCGv_i64, TCGv_i32);
+#else
+typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
+typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
+typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
+typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
+#endif
+
+static void * const table_cmpxchg[16] = {
+ [MO_8] = gen_helper_atomic_cmpxchgb,
+ [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
+ [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
+ [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
+ [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
+ [MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le,
+ [MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be,
+};
+
+void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
+ TCGv_i32 newv, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+
+ if (!parallel_cpus) {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
+
+ tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
+ tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
+ tcg_gen_qemu_st_i32(t2, addr, idx, memop);
+ tcg_temp_free_i32(t2);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i32(retv, t1, memop);
+ } else {
+ tcg_gen_mov_i32(retv, t1);
+ }
+ tcg_temp_free_i32(t1);
+ } else {
+ gen_atomic_cx_i32 gen;
+
+ gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
+ tcg_debug_assert(gen != NULL);
+
+#ifdef CONFIG_SOFTMMU
+ {
+ TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
+ gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ tcg_temp_free_i32(oi);
+ }
+#else
+ gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+#endif
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i32(retv, retv, memop);
+ }
+ }
+}
+
+void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
+ TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 1, 0);
+
+ if (!parallel_cpus) {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
+
+ tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
+ tcg_gen_qemu_st_i64(t2, addr, idx, memop);
+ tcg_temp_free_i64(t2);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i64(retv, t1, memop);
+ } else {
+ tcg_gen_mov_i64(retv, t1);
+ }
+ tcg_temp_free_i64(t1);
+ } else if ((memop & MO_SIZE) == MO_64) {
+ gen_atomic_cx_i64 gen;
+
+ gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
+ tcg_debug_assert(gen != NULL);
+
+#ifdef CONFIG_SOFTMMU
+ {
+ TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
+ gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ tcg_temp_free_i32(oi);
+ }
+#else
+ gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+#endif
+ } else {
+ TCGv_i32 c32 = tcg_temp_new_i32();
+ TCGv_i32 n32 = tcg_temp_new_i32();
+ TCGv_i32 r32 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(c32, cmpv);
+ tcg_gen_extrl_i64_i32(n32, newv);
+ tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN);
+ tcg_temp_free_i32(c32);
+ tcg_temp_free_i32(n32);
+
+ tcg_gen_extu_i32_i64(retv, r32);
+ tcg_temp_free_i32(r32);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i64(retv, retv, memop);
+ }
+ }
+}
+
+static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
+ TCGArg idx, TCGMemOp memop, bool new_val,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+
+ tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
+ gen(t2, t1, val);
+ tcg_gen_qemu_st_i32(t2, addr, idx, memop);
+
+ tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
+ TCGArg idx, TCGMemOp memop, void * const table[])
+{
+ gen_atomic_op_i32 gen;
+
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+
+ gen = table[memop & (MO_SIZE | MO_BSWAP)];
+ tcg_debug_assert(gen != NULL);
+
+#ifdef CONFIG_SOFTMMU
+ {
+ TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
+ gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ tcg_temp_free_i32(oi);
+ }
+#else
+ gen(ret, tcg_ctx.tcg_env, addr, val);
+#endif
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i32(ret, ret, memop);
+ }
+}
+
+static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
+ TCGArg idx, TCGMemOp memop, bool new_val,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ memop = tcg_canonicalize_memop(memop, 1, 0);
+
+ tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
+ gen(t2, t1, val);
+ tcg_gen_qemu_st_i64(t2, addr, idx, memop);
+
+ tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
+ TCGArg idx, TCGMemOp memop, void * const table[])
+{
+ memop = tcg_canonicalize_memop(memop, 1, 0);
+
+ if ((memop & MO_SIZE) == MO_64) {
+ gen_atomic_op_i64 gen;
+
+ gen = table[memop & (MO_SIZE | MO_BSWAP)];
+ tcg_debug_assert(gen != NULL);
+
+#ifdef CONFIG_SOFTMMU
+ {
+ TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
+ gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ tcg_temp_free_i32(oi);
+ }
+#else
+ gen(ret, tcg_ctx.tcg_env, addr, val);
+#endif
+ } else {
+ TCGv_i32 v32 = tcg_temp_new_i32();
+ TCGv_i32 r32 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(v32, val);
+ do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
+ tcg_temp_free_i32(v32);
+
+ tcg_gen_extu_i32_i64(ret, r32);
+ tcg_temp_free_i32(r32);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i64(ret, ret, memop);
+ }
+ }
+}
+
+#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
+static void * const table_##NAME[16] = { \
+ [MO_8] = gen_helper_atomic_##NAME##b, \
+ [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
+ [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
+ [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
+ [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
+ [MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le, \
+ [MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be, \
+}; \
+void tcg_gen_atomic_##NAME##_i32 \
+ (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
+{ \
+ if (parallel_cpus) { \
+ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
+ } else { \
+ do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
+ tcg_gen_##OP##_i32); \
+ } \
+} \
+void tcg_gen_atomic_##NAME##_i64 \
+ (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
+{ \
+ if (parallel_cpus) { \
+ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
+ } else { \
+ do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
+ tcg_gen_##OP##_i64); \
+ } \
+}
+
+GEN_ATOMIC_HELPER(fetch_add, add, 0)
+GEN_ATOMIC_HELPER(fetch_and, and, 0)
+GEN_ATOMIC_HELPER(fetch_or, or, 0)
+GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
+
+GEN_ATOMIC_HELPER(add_fetch, add, 1)
+GEN_ATOMIC_HELPER(and_fetch, and, 1)
+GEN_ATOMIC_HELPER(or_fetch, or, 1)
+GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
+
+static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_mov_i32(r, b);
+}
+
+static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_mov_i64(r, b);
+}
+
+GEN_ATOMIC_HELPER(xchg, mov2, 0)
+
+#undef GEN_ATOMIC_HELPER
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 02cb376681..89b59e867a 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -854,6 +854,30 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
}
+void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
+ TCGArg, TCGMemOp);
+void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
+ TCGArg, TCGMemOp);
+
+void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
+void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
+
#if TARGET_LONG_BITS == 64
#define tcg_gen_movi_tl tcg_gen_movi_i64
#define tcg_gen_mov_tl tcg_gen_mov_i64
@@ -932,6 +956,16 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
+#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64
+#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64
+#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64
+#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64
+#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64
+#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64
+#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64
+#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64
+#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64
+#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64
#else
#define tcg_gen_movi_tl tcg_gen_movi_i32
#define tcg_gen_mov_tl tcg_gen_mov_i32
@@ -1009,6 +1043,16 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
+#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32
+#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32
+#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32
+#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32
+#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32
+#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32
+#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32
+#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32
+#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32
+#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32
#endif
#if UINTPTR_MAX == UINT32_MAX
diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h
index 23a0c37711..22367aaf97 100644
--- a/tcg/tcg-runtime.h
+++ b/tcg/tcg-runtime.h
@@ -14,3 +14,78 @@ DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+#ifdef CONFIG_SOFTMMU
+
+DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
+ i64, env, tl, i64, i64, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
+ i64, env, tl, i64, i64, i32)
+
+#define GEN_ATOMIC_HELPERS(NAME) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
+ TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
+ TCG_CALL_NO_WG, i64, env, tl, i64, i32)
+
+#else
+
+DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
+
+#define GEN_ATOMIC_HELPERS(NAME) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \
+ TCG_CALL_NO_WG, i64, env, tl, i64) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \
+ TCG_CALL_NO_WG, i64, env, tl, i64)
+
+#endif /* CONFIG_SOFTMMU */
+
+GEN_ATOMIC_HELPERS(fetch_add)
+GEN_ATOMIC_HELPERS(fetch_and)
+GEN_ATOMIC_HELPERS(fetch_or)
+GEN_ATOMIC_HELPERS(fetch_xor)
+
+GEN_ATOMIC_HELPERS(add_fetch)
+GEN_ATOMIC_HELPERS(and_fetch)
+GEN_ATOMIC_HELPERS(or_fetch)
+GEN_ATOMIC_HELPERS(xor_fetch)
+
+GEN_ATOMIC_HELPERS(xchg)
+
+#undef GEN_ATOMIC_HELPERS
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 3b211560c8..593196571e 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -1177,6 +1177,59 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
+uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, target_ulong addr, TYPE val, \
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
#endif /* CONFIG_SOFTMMU */
#endif /* TCG_H */