summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Maydell2021-03-18 18:11:17 +0100
committerPeter Maydell2021-03-18 18:11:17 +0100
commit4083904bc9fe5da580f7ca397b1e828fbc322732 (patch)
tree5d90857c77c41f085285bb04bde2b50d65165990
parentMerge remote-tracking branch 'remotes/stsquad/tags/pull-misc-6.0-updates-1703... (diff)
parenttcg: Fix prototypes for tcg_out_vec_op and tcg_out_op (diff)
downloadqemu-4083904bc9fe5da580f7ca397b1e828fbc322732.tar.gz
qemu-4083904bc9fe5da580f7ca397b1e828fbc322732.tar.xz
qemu-4083904bc9fe5da580f7ca397b1e828fbc322732.zip
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210317' into staging
TCI argument extraction helpers and disassembler TCG build fix for gcc 11 # gpg: Signature made Wed 17 Mar 2021 15:29:47 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth-gitlab/tags/pull-tcg-20210317: (38 commits) tcg: Fix prototypes for tcg_out_vec_op and tcg_out_op tcg/tci: Split out tcg_out_op_r[iI] tcg/tci: Split out tcg_out_op_v tcg/tci: Split out tcg_out_op_{rrm,rrrm,rrrrm} tcg/tci: Split out tcg_out_op_rrrrcl tcg/tci: Split out tcg_out_op_rrrr tcg/tci: Split out tcg_out_op_rrrrrr tcg/tci: Split out tcg_out_op_rrcl tcg/tci: Split out tcg_out_op_rrrbb tcg/tci: Split out tcg_out_op_rrrrrc tcg/tci: Split out tcg_out_op_rrrc tcg/tci: Split out tcg_out_op_rrr tcg/tci: Split out tcg_out_op_rr tcg/tci: Split out tcg_out_op_p tcg/tci: Split out tcg_out_op_l tcg/tci: Split out tcg_out_op_rrs tcg/tci: Push opcode emit into each case tcg/tci: Implement the disassembler properly tcg/tci: Remove tci_disas tcg/tci: Hoist op_size checking into tci_args_* ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--disas/tci.c61
-rw-r--r--include/tcg/tcg-opc.h2
-rw-r--r--meson.build2
-rw-r--r--tcg/aarch64/tcg-target.c.inc3
-rw-r--r--tcg/i386/tcg-target.c.inc6
-rw-r--r--tcg/mips/tcg-target.c.inc3
-rw-r--r--tcg/ppc/tcg-target.c.inc8
-rw-r--r--tcg/riscv/tcg-target.c.inc3
-rw-r--r--tcg/s390/tcg-target.c.inc3
-rw-r--r--tcg/tcg.c19
-rw-r--r--tcg/tci.c1099
-rw-r--r--tcg/tci/tcg-target-con-set.h1
-rw-r--r--tcg/tci/tcg-target.c.inc480
-rw-r--r--tcg/tci/tcg-target.h13
14 files changed, 1099 insertions, 604 deletions
diff --git a/disas/tci.c b/disas/tci.c
deleted file mode 100644
index f1d6c6b469..0000000000
--- a/disas/tci.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Tiny Code Interpreter for QEMU - disassembler
- *
- * Copyright (c) 2011 Stefan Weil
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "disas/dis-asm.h"
-#include "tcg/tcg.h"
-
-/* Disassemble TCI bytecode. */
-int print_insn_tci(bfd_vma addr, disassemble_info *info)
-{
- int length;
- uint8_t byte;
- int status;
- TCGOpcode op;
-
- status = info->read_memory_func(addr, &byte, 1, info);
- if (status != 0) {
- info->memory_error_func(status, addr, info);
- return -1;
- }
- op = byte;
-
- addr++;
- status = info->read_memory_func(addr, &byte, 1, info);
- if (status != 0) {
- info->memory_error_func(status, addr, info);
- return -1;
- }
- length = byte;
-
- if (op >= tcg_op_defs_max) {
- info->fprintf_func(info->stream, "illegal opcode %d", op);
- } else {
- const TCGOpDef *def = &tcg_op_defs[op];
- int nb_oargs = def->nb_oargs;
- int nb_iargs = def->nb_iargs;
- int nb_cargs = def->nb_cargs;
- /* TODO: Improve disassembler output. */
- info->fprintf_func(info->stream, "%s\to=%d i=%d c=%d",
- def->name, nb_oargs, nb_iargs, nb_cargs);
- }
-
- return length;
-}
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
index 900984c005..bbb0884af8 100644
--- a/include/tcg/tcg-opc.h
+++ b/include/tcg/tcg-opc.h
@@ -278,10 +278,8 @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
#ifdef TCG_TARGET_INTERPRETER
/* These opcodes are only for use between the tci generator and interpreter. */
DEF(tci_movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
-#if TCG_TARGET_REG_BITS == 64
DEF(tci_movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
#endif
-#endif
#undef TLADDR_ARGS
#undef DATA64_ARGS
diff --git a/meson.build b/meson.build
index a7d2dd429d..5c85a15364 100644
--- a/meson.build
+++ b/meson.build
@@ -1943,7 +1943,7 @@ specific_ss.add(when: 'CONFIG_TCG', if_true: files(
'tcg/tcg-op.c',
'tcg/tcg.c',
))
-specific_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('disas/tci.c', 'tcg/tci.c'))
+specific_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tcg/tci.c'))
subdir('backends')
subdir('disas')
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index fcaa5aface..f07ba98aa4 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -2286,7 +2286,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
static const AArch64Insn cmp_vec_insn[16] = {
[TCG_COND_EQ] = I3616_CMEQ,
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 40326c2806..415c5c0796 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -2177,7 +2177,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
}
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2;
int c, const_a2, vexop, rexw = 0;
@@ -2613,7 +2614,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
static int const add_insn[4] = {
OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index ab55f3109b..8738a3a581 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -1651,7 +1651,8 @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
}
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
MIPSInsn i1, i2;
TCGArg a0, a1, a2;
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 4377d15d62..838ccfa42d 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -2319,8 +2319,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out32(s, BCLR | BO_ALWAYS);
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args)
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2;
int c;
@@ -3115,7 +3116,8 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
static const uint32_t
add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index e700c52067..ef43147040 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -1212,7 +1212,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
static const tcg_insn_unit *tb_ret_addr;
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0 = args[0];
TCGArg a1 = args[1];
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
index 695d7ee652..af8dfe81ac 100644
--- a/tcg/s390/tcg-target.c.inc
+++ b/tcg/s390/tcg-target.c.inc
@@ -1705,7 +1705,8 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
case glue(glue(INDEX_op_,x),_i64)
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg *args, const int *const_args)
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
S390Opcode op, op2;
TCGArg a0, a1, a2;
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 2991112829..de91bb6e9e 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -107,8 +107,9 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args);
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS]);
#if TCG_TARGET_MAYBE_vec
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src);
@@ -116,9 +117,10 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg base, intptr_t offset);
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, int64_t arg);
-static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
- unsigned vece, const TCGArg *args,
- const int *const_args);
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS]);
#else
static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src)
@@ -135,9 +137,10 @@ static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
{
g_assert_not_reached();
}
-static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
- unsigned vece, const TCGArg *args,
- const int *const_args)
+static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
g_assert_not_reached();
}
diff --git a/tcg/tci.c b/tcg/tci.c
index 3ccd30c39c..d68c5a4e55 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -24,7 +24,7 @@
#if defined(CONFIG_DEBUG_TCG)
# define tci_assert(cond) assert(cond)
#else
-# define tci_assert(cond) ((void)0)
+# define tci_assert(cond) ((void)(cond))
#endif
#include "qemu-common.h"
@@ -66,22 +66,32 @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
regs[index] = value;
}
-#if TCG_TARGET_REG_BITS == 32
static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
uint32_t low_index, uint64_t value)
{
tci_write_reg(regs, low_index, value);
tci_write_reg(regs, high_index, value >> 32);
}
-#endif
-#if TCG_TARGET_REG_BITS == 32
/* Create a 64 bit value from two 32 bit values. */
static uint64_t tci_uint64(uint32_t high, uint32_t low)
{
return ((uint64_t)high << 32) + low;
}
-#endif
+
+/* Read constant byte from bytecode. */
+static uint8_t tci_read_b(const uint8_t **tb_ptr)
+{
+ return *(tb_ptr[0]++);
+}
+
+/* Read register number from bytecode. */
+static TCGReg tci_read_r(const uint8_t **tb_ptr)
+{
+ uint8_t regno = tci_read_b(tb_ptr);
+ tci_assert(regno < TCG_TARGET_NB_REGS);
+ return regno;
+}
/* Read constant (native size) from bytecode. */
static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
@@ -107,59 +117,239 @@ static int32_t tci_read_s32(const uint8_t **tb_ptr)
return value;
}
+static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
+{
+ return tci_read_i(tb_ptr);
+}
+
+/*
+ * Load sets of arguments all at once. The naming convention is:
+ * tci_args_<arguments>
+ * where arguments is a sequence of
+ *
+ * b = immediate (bit position)
+ * c = condition (TCGCond)
+ * i = immediate (uint32_t)
+ * I = immediate (tcg_target_ulong)
+ * l = label or pointer
+ * m = immediate (TCGMemOpIdx)
+ * r = register
+ * s = signed ldst offset
+ */
+
+static void check_size(const uint8_t *start, const uint8_t **tb_ptr)
+{
+ const uint8_t *old_code_ptr = start - 2;
+ uint8_t op_size = old_code_ptr[1];
+ tci_assert(*tb_ptr == old_code_ptr + op_size);
+}
+
+static void tci_args_l(const uint8_t **tb_ptr, void **l0)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *l0 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rr(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_ri(const uint8_t **tb_ptr,
+ TCGReg *r0, tcg_target_ulong *i1)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *i1 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
#if TCG_TARGET_REG_BITS == 64
-/* Read constant (64 bit) from bytecode. */
-static uint64_t tci_read_i64(const uint8_t **tb_ptr)
+static void tci_args_rI(const uint8_t **tb_ptr,
+ TCGReg *r0, tcg_target_ulong *i1)
{
- uint64_t value = *(const uint64_t *)(*tb_ptr);
- *tb_ptr += sizeof(value);
- return value;
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *i1 = tci_read_i(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#endif
-/* Read indexed register (native size) from bytecode. */
-static tcg_target_ulong
-tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
+static void tci_args_rrm(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGMemOpIdx *m2)
{
- tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *m2 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrr(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrs(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, int32_t *i2)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *i2 = tci_read_s32(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrcl(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGCond *c2, void **l3)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *c2 = tci_read_b(tb_ptr);
+ *l3 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrrc(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *c3 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrrm(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *m3 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, uint8_t *i3, uint8_t *i4)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *i3 = tci_read_b(tb_ptr);
+ *i4 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
+}
+
+static void tci_args_rrrrm(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGMemOpIdx *m4)
+{
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *r3 = tci_read_r(tb_ptr);
+ *m4 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#if TCG_TARGET_REG_BITS == 32
-/* Read two indexed registers (2 * 32 bit) from bytecode. */
-static uint64_t tci_read_r64(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
+static void tci_args_rrrr(const uint8_t **tb_ptr,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
{
- uint32_t low = tci_read_r(regs, tb_ptr);
- return tci_uint64(tci_read_r(regs, tb_ptr), low);
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *r3 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
-#elif TCG_TARGET_REG_BITS == 64
-/* Read indexed register (64 bit) from bytecode. */
-static uint64_t tci_read_r64(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
+
+static void tci_args_rrrrcl(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGCond *c4, void **l5)
{
- return tci_read_r(regs, tb_ptr);
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *r3 = tci_read_r(tb_ptr);
+ *c4 = tci_read_b(tb_ptr);
+ *l5 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
-#endif
-/* Read indexed register(s) with target address from bytecode. */
-static target_ulong
-tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
+static void tci_args_rrrrrc(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
{
- target_ulong taddr = tci_read_r(regs, tb_ptr);
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
- taddr += (uint64_t)tci_read_r(regs, tb_ptr) << 32;
-#endif
- return taddr;
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *r3 = tci_read_r(tb_ptr);
+ *r4 = tci_read_r(tb_ptr);
+ *c5 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
-static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
+static void tci_args_rrrrrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
{
- tcg_target_ulong label = tci_read_i(tb_ptr);
- tci_assert(label != 0);
- return label;
+ const uint8_t *start = *tb_ptr;
+
+ *r0 = tci_read_r(tb_ptr);
+ *r1 = tci_read_r(tb_ptr);
+ *r2 = tci_read_r(tb_ptr);
+ *r3 = tci_read_r(tb_ptr);
+ *r4 = tci_read_r(tb_ptr);
+ *r5 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
+#endif
static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
{
@@ -299,7 +489,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
long tcg_temps[CPU_TEMP_BUF_NLONGS];
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
- uintptr_t ret = 0;
regs[TCG_AREG0] = (tcg_target_ulong)env;
regs[TCG_REG_CALL_STACK] = sp_value;
@@ -307,494 +496,378 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
for (;;) {
TCGOpcode opc = tb_ptr[0];
-#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
- uint8_t op_size = tb_ptr[1];
- const uint8_t *old_code_ptr = tb_ptr;
-#endif
- tcg_target_ulong t0;
+ TCGReg r0, r1, r2, r3;
tcg_target_ulong t1;
- tcg_target_ulong t2;
- tcg_target_ulong label;
TCGCond condition;
target_ulong taddr;
- uint8_t tmp8;
- uint16_t tmp16;
+ uint8_t pos, len;
uint32_t tmp32;
uint64_t tmp64;
#if TCG_TARGET_REG_BITS == 32
- uint64_t v64;
+ TCGReg r4, r5;
+ uint64_t T1, T2;
#endif
TCGMemOpIdx oi;
+ int32_t ofs;
+ void *ptr;
/* Skip opcode and size entry. */
tb_ptr += 2;
switch (opc) {
case INDEX_op_call:
- t0 = tci_read_i(&tb_ptr);
+ tci_args_l(&tb_ptr, &ptr);
tci_tb_ptr = (uintptr_t)tb_ptr;
#if TCG_TARGET_REG_BITS == 32
- tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
- tci_read_reg(regs, TCG_REG_R1),
- tci_read_reg(regs, TCG_REG_R2),
- tci_read_reg(regs, TCG_REG_R3),
- tci_read_reg(regs, TCG_REG_R4),
- tci_read_reg(regs, TCG_REG_R5),
- tci_read_reg(regs, TCG_REG_R6),
- tci_read_reg(regs, TCG_REG_R7),
- tci_read_reg(regs, TCG_REG_R8),
- tci_read_reg(regs, TCG_REG_R9),
- tci_read_reg(regs, TCG_REG_R10),
- tci_read_reg(regs, TCG_REG_R11));
+ tmp64 = ((helper_function)ptr)(tci_read_reg(regs, TCG_REG_R0),
+ tci_read_reg(regs, TCG_REG_R1),
+ tci_read_reg(regs, TCG_REG_R2),
+ tci_read_reg(regs, TCG_REG_R3),
+ tci_read_reg(regs, TCG_REG_R4),
+ tci_read_reg(regs, TCG_REG_R5),
+ tci_read_reg(regs, TCG_REG_R6),
+ tci_read_reg(regs, TCG_REG_R7),
+ tci_read_reg(regs, TCG_REG_R8),
+ tci_read_reg(regs, TCG_REG_R9),
+ tci_read_reg(regs, TCG_REG_R10),
+ tci_read_reg(regs, TCG_REG_R11));
tci_write_reg(regs, TCG_REG_R0, tmp64);
tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
#else
- tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
- tci_read_reg(regs, TCG_REG_R1),
- tci_read_reg(regs, TCG_REG_R2),
- tci_read_reg(regs, TCG_REG_R3),
- tci_read_reg(regs, TCG_REG_R4),
- tci_read_reg(regs, TCG_REG_R5));
+ tmp64 = ((helper_function)ptr)(tci_read_reg(regs, TCG_REG_R0),
+ tci_read_reg(regs, TCG_REG_R1),
+ tci_read_reg(regs, TCG_REG_R2),
+ tci_read_reg(regs, TCG_REG_R3),
+ tci_read_reg(regs, TCG_REG_R4),
+ tci_read_reg(regs, TCG_REG_R5));
tci_write_reg(regs, TCG_REG_R0, tmp64);
#endif
break;
case INDEX_op_br:
- label = tci_read_label(&tb_ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
+ tci_args_l(&tb_ptr, &ptr);
+ tb_ptr = ptr;
continue;
case INDEX_op_setcond_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- condition = *tb_ptr++;
- tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
+ tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
+ regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
- t0 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- v64 = tci_read_r64(regs, &tb_ptr);
- condition = *tb_ptr++;
- tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
+ tci_args_rrrrrc(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &condition);
+ T1 = tci_uint64(regs[r2], regs[r1]);
+ T2 = tci_uint64(regs[r4], regs[r3]);
+ regs[r0] = tci_compare64(T1, T2, condition);
break;
#elif TCG_TARGET_REG_BITS == 64
case INDEX_op_setcond_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- condition = *tb_ptr++;
- tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
+ tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
+ regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
break;
#endif
CASE_32_64(mov)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = regs[r1];
break;
case INDEX_op_tci_movi_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_i32(&tb_ptr);
- tci_write_reg(regs, t0, t1);
+ tci_args_ri(&tb_ptr, &r0, &t1);
+ regs[r0] = t1;
break;
/* Load/store operations (32 bit). */
CASE_32_64(ld8u)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint8_t *)ptr;
break;
CASE_32_64(ld8s)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int8_t *)ptr;
break;
CASE_32_64(ld16u)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint16_t *)ptr;
break;
CASE_32_64(ld16s)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(int16_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int16_t *)ptr;
break;
case INDEX_op_ld_i32:
CASE_64(ld32u)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint32_t *)ptr;
break;
CASE_32_64(st8)
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- *(uint8_t *)(t1 + t2) = t0;
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint8_t *)ptr = regs[r0];
break;
CASE_32_64(st16)
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- *(uint16_t *)(t1 + t2) = t0;
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint16_t *)ptr = regs[r0];
break;
case INDEX_op_st_i32:
CASE_64(st32)
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- *(uint32_t *)(t1 + t2) = t0;
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint32_t *)ptr = regs[r0];
break;
/* Arithmetic operations (mixed 32/64 bit). */
CASE_32_64(add)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 + t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] + regs[r2];
break;
CASE_32_64(sub)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 - t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] - regs[r2];
break;
CASE_32_64(mul)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 * t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] * regs[r2];
break;
CASE_32_64(and)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 & t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] & regs[r2];
break;
CASE_32_64(or)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 | t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] | regs[r2];
break;
CASE_32_64(xor)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 ^ t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ^ regs[r2];
break;
/* Arithmetic operations (32 bit). */
case INDEX_op_div_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
break;
case INDEX_op_divu_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
break;
case INDEX_op_rem_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
break;
case INDEX_op_remu_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
break;
/* Shift/rotate operations (32 bit). */
case INDEX_op_shl_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
break;
case INDEX_op_shr_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
break;
case INDEX_op_sar_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, rol32(t1, t2 & 31));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = rol32(regs[r1], regs[r2] & 31);
break;
case INDEX_op_rotr_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, ror32(t1, t2 & 31));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = ror32(regs[r1], regs[r2] & 31);
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
case INDEX_op_deposit_i32:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tmp16 = *tb_ptr++;
- tmp8 = *tb_ptr++;
- tmp32 = (((1 << tmp8) - 1) << tmp16);
- tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
+ tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
break;
#endif
case INDEX_op_brcond_i32:
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare32(t0, t1, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
- continue;
+ tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
+ if (tci_compare32(regs[r0], regs[r1], condition)) {
+ tb_ptr = ptr;
}
break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_add2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- tmp64 += tci_read_r64(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, tmp64);
+ tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 + T2);
break;
case INDEX_op_sub2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- tmp64 = tci_read_r64(regs, &tb_ptr);
- tmp64 -= tci_read_r64(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, tmp64);
+ tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 - T2);
break;
case INDEX_op_brcond2_i32:
- tmp64 = tci_read_r64(regs, &tb_ptr);
- v64 = tci_read_r64(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare64(tmp64, v64, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
+ tci_args_rrrrcl(&tb_ptr, &r0, &r1, &r2, &r3, &condition, &ptr);
+ T1 = tci_uint64(regs[r1], regs[r0]);
+ T2 = tci_uint64(regs[r3], regs[r2]);
+ if (tci_compare64(T1, T2, condition)) {
+ tb_ptr = ptr;
continue;
}
break;
case INDEX_op_mulu2_i32:
- t0 = *tb_ptr++;
- t1 = *tb_ptr++;
- t2 = tci_read_r(regs, &tb_ptr);
- tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
+ tci_args_rrrr(&tb_ptr, &r0, &r1, &r2, &r3);
+ tci_write_reg64(regs, r1, r0, (uint64_t)regs[r2] * regs[r3]);
break;
#endif /* TCG_TARGET_REG_BITS == 32 */
#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
CASE_32_64(ext8s)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int8_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (int8_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
CASE_32_64(ext16s)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int16_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (int16_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
CASE_32_64(ext8u)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint8_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (uint8_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
CASE_32_64(ext16u)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint16_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (uint16_t)regs[r1];
break;
#endif
#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
CASE_32_64(bswap16)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap16(t1));
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = bswap16(regs[r1]);
break;
#endif
#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
CASE_32_64(bswap32)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap32(t1));
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = bswap32(regs[r1]);
break;
#endif
#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
CASE_32_64(not)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, ~t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = ~regs[r1];
break;
#endif
#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
CASE_32_64(neg)
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, -t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = -regs[r1];
break;
#endif
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_tci_movi_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_i64(&tb_ptr);
- tci_write_reg(regs, t0, t1);
+ tci_args_rI(&tb_ptr, &r0, &t1);
+ regs[r0] = t1;
break;
/* Load/store operations (64 bit). */
case INDEX_op_ld32s_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(int32_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int32_t *)ptr;
break;
case INDEX_op_ld_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint64_t *)ptr;
break;
case INDEX_op_st_i64:
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_s32(&tb_ptr);
- *(uint64_t *)(t1 + t2) = t0;
+ tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint64_t *)ptr = regs[r0];
break;
/* Arithmetic operations (64 bit). */
case INDEX_op_div_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
break;
case INDEX_op_divu_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
break;
case INDEX_op_rem_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
break;
case INDEX_op_remu_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
break;
/* Shift/rotate operations (64 bit). */
case INDEX_op_shl_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 << (t2 & 63));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] << (regs[r2] & 63);
break;
case INDEX_op_shr_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 >> (t2 & 63));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = regs[r1] >> (regs[r2] & 63);
break;
case INDEX_op_sar_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
break;
#if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, rol64(t1, t2 & 63));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = rol64(regs[r1], regs[r2] & 63);
break;
case INDEX_op_rotr_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, ror64(t1, t2 & 63));
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ regs[r0] = ror64(regs[r1], regs[r2] & 63);
break;
#endif
#if TCG_TARGET_HAS_deposit_i64
case INDEX_op_deposit_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- t2 = tci_read_r(regs, &tb_ptr);
- tmp16 = *tb_ptr++;
- tmp8 = *tb_ptr++;
- tmp64 = (((1ULL << tmp8) - 1) << tmp16);
- tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
+ tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
break;
#endif
case INDEX_op_brcond_i64:
- t0 = tci_read_r(regs, &tb_ptr);
- t1 = tci_read_r(regs, &tb_ptr);
- condition = *tb_ptr++;
- label = tci_read_label(&tb_ptr);
- if (tci_compare64(t0, t1, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr = (uint8_t *)label;
- continue;
+ tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
+ if (tci_compare64(regs[r0], regs[r1], condition)) {
+ tb_ptr = ptr;
}
break;
-#if TCG_TARGET_HAS_ext32s_i64
case INDEX_op_ext32s_i64:
-#endif
case INDEX_op_ext_i32_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (int32_t)regs[r1];
break;
-#if TCG_TARGET_HAS_ext32u_i64
case INDEX_op_ext32u_i64:
-#endif
case INDEX_op_extu_i32_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, (uint32_t)t1);
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = (uint32_t)regs[r1];
break;
#if TCG_TARGET_HAS_bswap64_i64
case INDEX_op_bswap64_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap64(t1));
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ regs[r0] = bswap64(regs[r1]);
break;
#endif
#endif /* TCG_TARGET_REG_BITS == 64 */
@@ -802,21 +875,22 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* QEMU specific operations. */
case INDEX_op_exit_tb:
- ret = *(uint64_t *)tb_ptr;
- goto exit;
- break;
+ tci_args_l(&tb_ptr, &ptr);
+ return (uintptr_t)ptr;
+
case INDEX_op_goto_tb:
- /* Jump address is aligned */
- tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
- t0 = qatomic_read((int32_t *)tb_ptr);
- tb_ptr += sizeof(int32_t);
- tci_assert(tb_ptr == old_code_ptr + op_size);
- tb_ptr += (int32_t)t0;
- continue;
+ tci_args_l(&tb_ptr, &ptr);
+ tb_ptr = *(void **)ptr;
+ break;
+
case INDEX_op_qemu_ld_i32:
- t0 = *tb_ptr++;
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrm(&tb_ptr, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ }
switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
case MO_UB:
tmp32 = qemu_ld_ub;
@@ -845,15 +919,20 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
default:
g_assert_not_reached();
}
- tci_write_reg(regs, t0, tmp32);
+ regs[r0] = tmp32;
break;
+
case INDEX_op_qemu_ld_i64:
- t0 = *tb_ptr++;
- if (TCG_TARGET_REG_BITS == 32) {
- t1 = *tb_ptr++;
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(&tb_ptr, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi);
+ taddr = regs[r2];
+ } else {
+ tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi);
+ taddr = tci_uint64(regs[r3], regs[r2]);
}
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
case MO_UB:
tmp64 = qemu_ld_ub;
@@ -894,39 +973,58 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
default:
g_assert_not_reached();
}
- tci_write_reg(regs, t0, tmp64);
if (TCG_TARGET_REG_BITS == 32) {
- tci_write_reg(regs, t1, tmp64 >> 32);
+ tci_write_reg64(regs, r1, r0, tmp64);
+ } else {
+ regs[r0] = tmp64;
}
break;
+
case INDEX_op_qemu_st_i32:
- t0 = tci_read_r(regs, &tb_ptr);
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrm(&tb_ptr, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ }
+ tmp32 = regs[r0];
switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
case MO_UB:
- qemu_st_b(t0);
+ qemu_st_b(tmp32);
break;
case MO_LEUW:
- qemu_st_lew(t0);
+ qemu_st_lew(tmp32);
break;
case MO_LEUL:
- qemu_st_lel(t0);
+ qemu_st_lel(tmp32);
break;
case MO_BEUW:
- qemu_st_bew(t0);
+ qemu_st_bew(tmp32);
break;
case MO_BEUL:
- qemu_st_bel(t0);
+ qemu_st_bel(tmp32);
break;
default:
g_assert_not_reached();
}
break;
+
case INDEX_op_qemu_st_i64:
- tmp64 = tci_read_r64(regs, &tb_ptr);
- taddr = tci_read_ulong(regs, &tb_ptr);
- oi = tci_read_i(&tb_ptr);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(&tb_ptr, &r0, &r1, &oi);
+ taddr = regs[r1];
+ tmp64 = regs[r0];
+ } else {
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi);
+ taddr = regs[r2];
+ } else {
+ tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi);
+ taddr = tci_uint64(regs[r3], regs[r2]);
+ }
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
+ }
switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
case MO_UB:
qemu_st_b(tmp64);
@@ -953,6 +1051,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
g_assert_not_reached();
}
break;
+
case INDEX_op_mb:
/* Ensure ordering for all kinds */
smp_mb();
@@ -960,8 +1059,288 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
default:
g_assert_not_reached();
}
- tci_assert(tb_ptr == old_code_ptr + op_size);
}
-exit:
- return ret;
+}
+
+/*
+ * Disassembler that matches the interpreter
+ */
+
+static const char *str_r(TCGReg r)
+{
+ static const char regs[TCG_TARGET_NB_REGS][4] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
+ };
+
+ QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
+ QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
+
+ assert((unsigned)r < TCG_TARGET_NB_REGS);
+ return regs[r];
+}
+
+static const char *str_c(TCGCond c)
+{
+ static const char cond[16][8] = {
+ [TCG_COND_NEVER] = "never",
+ [TCG_COND_ALWAYS] = "always",
+ [TCG_COND_EQ] = "eq",
+ [TCG_COND_NE] = "ne",
+ [TCG_COND_LT] = "lt",
+ [TCG_COND_GE] = "ge",
+ [TCG_COND_LE] = "le",
+ [TCG_COND_GT] = "gt",
+ [TCG_COND_LTU] = "ltu",
+ [TCG_COND_GEU] = "geu",
+ [TCG_COND_LEU] = "leu",
+ [TCG_COND_GTU] = "gtu",
+ };
+
+ assert((unsigned)c < ARRAY_SIZE(cond));
+ assert(cond[c][0] != 0);
+ return cond[c];
+}
+
+/* Disassemble TCI bytecode. */
+int print_insn_tci(bfd_vma addr, disassemble_info *info)
+{
+ uint8_t buf[256];
+ int length, status;
+ const TCGOpDef *def;
+ const char *op_name;
+ TCGOpcode op;
+ TCGReg r0, r1, r2, r3;
+#if TCG_TARGET_REG_BITS == 32
+ TCGReg r4, r5;
+#endif
+ tcg_target_ulong i1;
+ int32_t s2;
+ TCGCond c;
+ TCGMemOpIdx oi;
+ uint8_t pos, len;
+ void *ptr;
+ const uint8_t *tb_ptr;
+
+ status = info->read_memory_func(addr, buf, 2, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr, info);
+ return -1;
+ }
+ op = buf[0];
+ length = buf[1];
+
+ if (length < 2) {
+ info->fprintf_func(info->stream, "invalid length %d", length);
+ return 1;
+ }
+
+ status = info->read_memory_func(addr + 2, buf + 2, length - 2, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr + 2, info);
+ return -1;
+ }
+
+ def = &tcg_op_defs[op];
+ op_name = def->name;
+ tb_ptr = buf + 2;
+
+ switch (op) {
+ case INDEX_op_br:
+ case INDEX_op_call:
+ case INDEX_op_exit_tb:
+ case INDEX_op_goto_tb:
+ tci_args_l(&tb_ptr, &ptr);
+ info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
+ break;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ tci_args_rrcl(&tb_ptr, &r0, &r1, &c, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %p",
+ op_name, str_r(r0), str_r(r1), str_c(c), ptr);
+ break;
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
+ break;
+
+ case INDEX_op_tci_movi_i32:
+ tci_args_ri(&tb_ptr, &r0, &i1);
+ info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
+ op_name, str_r(r0), i1);
+ break;
+
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_tci_movi_i64:
+ tci_args_rI(&tb_ptr, &r0, &i1);
+ info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
+ op_name, str_r(r0), i1);
+ break;
+#endif
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ tci_args_rrs(&tb_ptr, &r0, &r1, &s2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %d",
+ op_name, str_r(r0), str_r(r1), s2);
+ break;
+
+ case INDEX_op_mov_i32:
+ case INDEX_op_mov_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ tci_args_rr(&tb_ptr, &r0, &r1);
+ info->fprintf_func(info->stream, "%-12s %s, %s",
+ op_name, str_r(r0), str_r(r1));
+ break;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2));
+ break;
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
+ op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
+ break;
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_setcond2_i32:
+ tci_args_rrrrrc(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_c(c));
+ break;
+
+ case INDEX_op_brcond2_i32:
+ tci_args_rrrrcl(&tb_ptr, &r0, &r1, &r2, &r3, &c, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %p",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3), str_c(c), ptr);
+ break;
+
+ case INDEX_op_mulu2_i32:
+ tci_args_rrrr(&tb_ptr, &r0, &r1, &r2, &r3);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3));
+ break;
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_sub2_i32:
+ tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_r(r5));
+ break;
+#endif
+
+ case INDEX_op_qemu_ld_i64:
+ case INDEX_op_qemu_st_i64:
+ len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
+ goto do_qemu_ldst;
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_st_i32:
+ len = 1;
+ do_qemu_ldst:
+ len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
+ switch (len) {
+ case 2:
+ tci_args_rrm(&tb_ptr, &r0, &r1, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), oi);
+ break;
+ case 3:
+ tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), str_r(r2), oi);
+ break;
+ case 4:
+ tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %x",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3), oi);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ default:
+ info->fprintf_func(info->stream, "illegal opcode %d", op);
+ break;
+ }
+
+ return length;
}
diff --git a/tcg/tci/tcg-target-con-set.h b/tcg/tci/tcg-target-con-set.h
index f51b7bcb13..316730f32c 100644
--- a/tcg/tci/tcg-target-con-set.h
+++ b/tcg/tci/tcg-target-con-set.h
@@ -13,7 +13,6 @@ C_O0_I2(r, r)
C_O0_I3(r, r, r)
C_O0_I4(r, r, r, r)
C_O1_I1(r, r)
-C_O1_I2(r, 0, r)
C_O1_I2(r, r, r)
C_O1_I4(r, r, r, r, r)
C_O2_I1(r, r, r)
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index c79f9c32d8..ee6cdfec71 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -126,11 +126,9 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_rotr_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
- return C_O1_I2(r, r, r);
-
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
- return C_O1_I2(r, 0, r);
+ return C_O1_I2(r, r, r);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
@@ -255,16 +253,6 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return true;
}
-#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
-/* Show current bytecode. Used by tcg interpreter. */
-void tci_disas(uint8_t opc)
-{
- const TCGOpDef *def = &tcg_op_defs[opc];
- fprintf(stderr, "TCG %s %u, %u, %u\n",
- def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs);
-}
-#endif
-
/* Write value (native size). */
static void tcg_out_i(TCGContext *s, tcg_target_ulong v)
{
@@ -309,67 +297,300 @@ static void stack_bounds_check(TCGReg base, target_long offset)
}
}
-static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
- intptr_t arg2)
+static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
{
uint8_t *old_code_ptr = s->code_ptr;
- stack_bounds_check(arg1, arg2);
- if (type == TCG_TYPE_I32) {
- tcg_out_op_t(s, INDEX_op_ld_i32);
- tcg_out_r(s, ret);
- tcg_out_r(s, arg1);
- tcg_out32(s, arg2);
- } else {
- tcg_debug_assert(type == TCG_TYPE_I64);
+ tcg_out_op_t(s, op);
+ tci_out_label(s, l0);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_i(s, (uintptr_t)p0);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out32(s, i1);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
#if TCG_TARGET_REG_BITS == 64
- tcg_out_op_t(s, INDEX_op_ld_i64);
- tcg_out_r(s, ret);
- tcg_out_r(s, arg1);
- tcg_debug_assert(arg2 == (int32_t)arg2);
- tcg_out32(s, arg2);
-#else
- TODO();
+static void tcg_out_op_rI(TCGContext *s, TCGOpcode op,
+ TCGReg r0, uint64_t i1)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out64(s, i1);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
#endif
- }
+
+static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+
old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
-static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGArg m2)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out32(s, m2);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, intptr_t i2)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_debug_assert(i2 == (int32_t)i2);
+ tcg_out32(s, i2);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrcl(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGCond c2, TCGLabel *l3)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out8(s, c2);
+ tci_out_label(s, l3);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out8(s, c3);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrm(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGArg m3)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out32(s, m3);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
+ TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
{
uint8_t *old_code_ptr = s->code_ptr;
- tcg_debug_assert(ret != arg);
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out8(s, b3);
+ tcg_out8(s, b4);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrrm(TCGContext *s, TCGOpcode op, TCGReg r0,
+ TCGReg r1, TCGReg r2, TCGReg r3, TCGArg m4)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out_r(s, r3);
+ tcg_out32(s, m4);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
#if TCG_TARGET_REG_BITS == 32
- tcg_out_op_t(s, INDEX_op_mov_i32);
-#else
- tcg_out_op_t(s, INDEX_op_mov_i64);
-#endif
- tcg_out_r(s, ret);
- tcg_out_r(s, arg);
+static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out_r(s, r3);
+
old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrrcl(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3,
+ TCGCond c4, TCGLabel *l5)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out_r(s, r3);
+ tcg_out8(s, c4);
+ tci_out_label(s, l5);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2,
+ TCGReg r3, TCGReg r4, TCGCond c5)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out_r(s, r3);
+ tcg_out_r(s, r4);
+ tcg_out8(s, c5);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+
+static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2,
+ TCGReg r3, TCGReg r4, TCGReg r5)
+{
+ uint8_t *old_code_ptr = s->code_ptr;
+
+ tcg_out_op_t(s, op);
+ tcg_out_r(s, r0);
+ tcg_out_r(s, r1);
+ tcg_out_r(s, r2);
+ tcg_out_r(s, r3);
+ tcg_out_r(s, r4);
+ tcg_out_r(s, r5);
+
+ old_code_ptr[1] = s->code_ptr - old_code_ptr;
+}
+#endif
+
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
+ intptr_t offset)
+{
+ stack_bounds_check(base, offset);
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_rrs(s, INDEX_op_ld_i32, val, base, offset);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case TCG_TYPE_I64:
+ tcg_out_op_rrs(s, INDEX_op_ld_i64, val, base, offset);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case TCG_TYPE_I64:
+ tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
return true;
}
static void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg t0, tcg_target_long arg)
+ TCGReg ret, tcg_target_long arg)
{
- uint8_t *old_code_ptr = s->code_ptr;
- uint32_t arg32 = arg;
- if (type == TCG_TYPE_I32 || arg == arg32) {
- tcg_out_op_t(s, INDEX_op_tci_movi_i32);
- tcg_out_r(s, t0);
- tcg_out32(s, arg32);
- } else {
- tcg_debug_assert(type == TCG_TYPE_I64);
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_ri(s, INDEX_op_tci_movi_i32, ret, arg);
+ break;
#if TCG_TARGET_REG_BITS == 64
- tcg_out_op_t(s, INDEX_op_tci_movi_i64);
- tcg_out_r(s, t0);
- tcg_out64(s, arg);
-#else
- TODO();
+ case TCG_TYPE_I64:
+ tcg_out_op_rI(s, INDEX_op_tci_movi_i64, ret, arg);
+ break;
#endif
+ default:
+ g_assert_not_reached();
}
- old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
@@ -392,52 +613,34 @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
# define CASE_64(x)
#endif
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args)
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
- uint8_t *old_code_ptr = s->code_ptr;
-
- tcg_out_op_t(s, opc);
-
switch (opc) {
case INDEX_op_exit_tb:
- tcg_out64(s, args[0]);
+ tcg_out_op_p(s, opc, (void *)args[0]);
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_insn_offset) {
- /* Direct jump method. */
- /* Align for atomic patching and thread safety */
- s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
- tcg_out32(s, 0);
- } else {
- /* Indirect jump method. */
- TODO();
- }
+ tcg_debug_assert(s->tb_jmp_insn_offset == 0);
+ /* indirect jump method. */
+ tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
set_jmp_reset_offset(s, args[0]);
break;
case INDEX_op_br:
- tci_out_label(s, arg_label(args[0]));
+ tcg_out_op_l(s, opc, arg_label(args[0]));
break;
CASE_32_64(setcond)
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out8(s, args[3]); /* condition */
+ tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
- /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out_r(s, args[3]);
- tcg_out_r(s, args[4]);
- tcg_out8(s, args[5]); /* condition */
+ tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
+ args[3], args[4], args[5]);
break;
#endif
@@ -455,10 +658,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
CASE_64(st32)
CASE_64(st)
stack_bounds_check(args[1], args[2]);
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_debug_assert(args[2] == (int32_t)args[2]);
- tcg_out32(s, args[2]);
+ tcg_out_op_rrs(s, opc, args[0], args[1], args[2]);
break;
CASE_32_64(add)
@@ -481,26 +681,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
+ tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
break;
CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_debug_assert(args[3] <= UINT8_MAX);
- tcg_out8(s, args[3]);
- tcg_debug_assert(args[4] <= UINT8_MAX);
- tcg_out8(s, args[4]);
+ {
+ TCGArg pos = args[3], len = args[4];
+ TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64;
+
+ tcg_debug_assert(pos < max);
+ tcg_debug_assert(pos + len <= max);
+
+ tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len);
+ }
break;
CASE_32_64(brcond)
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out8(s, args[2]); /* condition */
- tci_out_label(s, arg_label(args[3]));
+ tcg_out_op_rrcl(s, opc, args[0], args[1], args[2], arg_label(args[3]));
break;
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
@@ -516,60 +713,47 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */
CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */
CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
+ tcg_out_op_rr(s, opc, args[0], args[1]);
break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out_r(s, args[3]);
- tcg_out_r(s, args[4]);
- tcg_out_r(s, args[5]);
+ tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
+ args[3], args[4], args[5]);
break;
case INDEX_op_brcond2_i32:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out_r(s, args[3]);
- tcg_out8(s, args[4]); /* condition */
- tci_out_label(s, arg_label(args[5]));
+ tcg_out_op_rrrrcl(s, opc, args[0], args[1], args[2],
+ args[3], args[4], arg_label(args[5]));
break;
case INDEX_op_mulu2_i32:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out_r(s, args[3]);
+ tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
break;
#endif
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
- tcg_out_r(s, *args++);
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
+ } else {
+ tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
}
- tcg_out_i(s, *args++);
break;
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
- tcg_out_r(s, *args++);
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_r(s, *args++);
- }
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
+ } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
+ } else {
+ tcg_out_op_rrrrm(s, opc, args[0], args[1],
+ args[2], args[3], args[4]);
}
- tcg_out_i(s, *args++);
break;
case INDEX_op_mb:
+ tcg_out_op_v(s, opc);
break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
@@ -578,32 +762,24 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
default:
tcg_abort();
}
- old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
-static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
- intptr_t arg2)
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
+ intptr_t offset)
{
- uint8_t *old_code_ptr = s->code_ptr;
-
- stack_bounds_check(arg1, arg2);
- if (type == TCG_TYPE_I32) {
- tcg_out_op_t(s, INDEX_op_st_i32);
- tcg_out_r(s, arg);
- tcg_out_r(s, arg1);
- tcg_out32(s, arg2);
- } else {
- tcg_debug_assert(type == TCG_TYPE_I64);
+ stack_bounds_check(base, offset);
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_rrs(s, INDEX_op_st_i32, val, base, offset);
+ break;
#if TCG_TARGET_REG_BITS == 64
- tcg_out_op_t(s, INDEX_op_st_i64);
- tcg_out_r(s, arg);
- tcg_out_r(s, arg1);
- tcg_out32(s, arg2);
-#else
- TODO();
+ case TCG_TYPE_I64:
+ tcg_out_op_rrs(s, INDEX_op_st_i64, val, base, offset);
+ break;
#endif
+ default:
+ g_assert_not_reached();
}
- old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 9c0021a26f..52af6d8bc5 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -87,7 +87,7 @@
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_goto_ptr 0
-#define TCG_TARGET_HAS_direct_jump 1
+#define TCG_TARGET_HAS_direct_jump 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
#if TCG_TARGET_REG_BITS == 64
@@ -163,8 +163,6 @@ typedef enum {
#define TCG_TARGET_CALL_STACK_OFFSET 0
#define TCG_TARGET_STACK_ALIGN 16
-void tci_disas(uint8_t opc);
-
#define HAVE_TCG_QEMU_TB_EXEC
/* We could notice __i386__ or __s390x__ and reduce the barriers depending
@@ -174,12 +172,7 @@ void tci_disas(uint8_t opc);
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- /* patch the branch destination */
- qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
- /* no need to flush icache explicitly */
-}
+/* not defined -- call should be eliminated at compile time */
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#endif /* TCG_TARGET_H */