summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tcg/aarch64/tcg-target.inc.c16
-rw-r--r--tcg/arm/tcg-target.inc.c16
-rw-r--r--tcg/i386/tcg-target.inc.c6
-rw-r--r--tcg/mips/tcg-target.inc.c6
-rw-r--r--tcg/ppc/tcg-target.inc.c14
-rw-r--r--tcg/riscv/tcg-target.inc.c16
-rw-r--r--tcg/s390/tcg-target.inc.c20
-rw-r--r--tcg/tcg-ldst.inc.c18
-rw-r--r--tcg/tcg.c7
9 files changed, 75 insertions, 44 deletions
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 8b93598bce..eefa929948 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1395,14 +1395,15 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
tcg_out_insn(s, 3406, ADR, rd, offset);
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp size = opc & MO_SIZE;
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
- tcg_debug_assert(ok);
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
@@ -1416,16 +1417,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
tcg_out_goto(s, lb->raddr);
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp size = opc & MO_SIZE;
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
- tcg_debug_assert(ok);
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
@@ -1434,6 +1437,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_adr(s, TCG_REG_X4, lb->raddr);
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
tcg_out_goto(s, lb->raddr);
+ return true;
}
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 6873b0cf95..12a65cc9a6 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -1372,15 +1372,16 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = label_ptr;
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg argreg, datalo, datahi;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
void *func;
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
- tcg_debug_assert(ok);
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
if (TARGET_LONG_BITS == 64) {
@@ -1432,16 +1433,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
tcg_out_goto(s, COND_AL, lb->raddr);
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg argreg, datalo, datahi;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
- tcg_debug_assert(ok);
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
argreg = TCG_REG_R0;
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
@@ -1474,6 +1477,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
/* Tail-call to the helper, which will return to the fast path. */
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+ return true;
}
#endif /* SOFTMMU */
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 1fa833840e..d5ed9f1ffd 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1730,7 +1730,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
/*
* Generate code for the slow path for a load at the end of block
*/
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1809,12 +1809,13 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* Jump to the code corresponding to next IR of qemu_st */
tcg_out_jmp(s, l->raddr);
+ return true;
}
/*
* Generate code for the slow path for a store at the end of block
*/
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1877,6 +1878,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* "Tail call" to the helper, with the return address back inline. */
tcg_out_push(s, retaddr);
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+ return true;
}
#elif TCG_TARGET_REG_BITS == 32
# define x86_guest_base_seg 0
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index 8a92e916dd..412cacdcb9 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1338,7 +1338,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
}
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1385,9 +1385,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
} else {
tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
}
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1435,6 +1436,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
/* delay slot */
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ return true;
}
#endif
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 773690f1d9..c0923ced4f 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1653,13 +1653,15 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = lptr;
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGReg hi, lo, arg = TCG_REG_R3;
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
@@ -1695,16 +1697,19 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
tcg_out_b(s, 0, lb->raddr);
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
TCGReg hi, lo, arg = TCG_REG_R3;
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
@@ -1753,6 +1758,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
tcg_out_b(s, 0, lb->raddr);
+ return true;
}
#endif /* SOFTMMU */
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
index b785f4acb7..2932505094 100644
--- a/tcg/riscv/tcg-target.inc.c
+++ b/tcg/riscv/tcg-target.inc.c
@@ -1065,7 +1065,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = label_ptr[0];
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1080,7 +1080,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
}
/* resolve label address */
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
+ (intptr_t) s->code_ptr, 0)) {
+ return false;
+ }
/* call load helper */
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
@@ -1092,9 +1095,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
tcg_out_goto(s, l->raddr);
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
@@ -1111,7 +1115,10 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
}
/* resolve label address */
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
+ (intptr_t) s->code_ptr, 0)) {
+ return false;
+ }
/* call store helper */
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
@@ -1133,6 +1140,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
tcg_out_goto(s, l->raddr);
+ return true;
}
#endif /* CONFIG_SOFTMMU */
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 7db90b3bae..3d6150b10e 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -1609,16 +1609,17 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = label_ptr;
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg addr_reg = lb->addrlo_reg;
TCGReg data_reg = lb->datalo_reg;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
- (intptr_t)s->code_ptr, 2);
- tcg_debug_assert(ok);
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
+ (intptr_t)s->code_ptr, 2)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
if (TARGET_LONG_BITS == 64) {
@@ -1630,18 +1631,20 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg addr_reg = lb->addrlo_reg;
TCGReg data_reg = lb->datalo_reg;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
- (intptr_t)s->code_ptr, 2);
- tcg_debug_assert(ok);
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
+ (intptr_t)s->code_ptr, 2)) {
+ return false;
+ }
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
if (TARGET_LONG_BITS == 64) {
@@ -1668,6 +1671,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
+ return true;
}
#else
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
diff --git a/tcg/tcg-ldst.inc.c b/tcg/tcg-ldst.inc.c
index 47f41b921b..05f9b3ccd6 100644
--- a/tcg/tcg-ldst.inc.c
+++ b/tcg/tcg-ldst.inc.c
@@ -38,19 +38,19 @@ typedef struct TCGLabelQemuLdst {
* Generate TB finalization at the end of block
*/
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
-static bool tcg_out_ldst_finalize(TCGContext *s)
+static int tcg_out_ldst_finalize(TCGContext *s)
{
TCGLabelQemuLdst *lb;
/* qemu_ld/st slow paths */
QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
- if (lb->is_ld) {
- tcg_out_qemu_ld_slow_path(s, lb);
- } else {
- tcg_out_qemu_st_slow_path(s, lb);
+ if (lb->is_ld
+ ? !tcg_out_qemu_ld_slow_path(s, lb)
+ : !tcg_out_qemu_st_slow_path(s, lb)) {
+ return -2;
}
/* Test for (pending) buffer overflow. The assumption is that any
@@ -58,10 +58,10 @@ static bool tcg_out_ldst_finalize(TCGContext *s)
the buffer completely. Thus we can test for overflow after
generating code without having to check during generation. */
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
- return false;
+ return -1;
}
}
- return true;
+ return 0;
}
/*
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 0394e8ab34..f7bef51de8 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -128,7 +128,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct);
#ifdef TCG_TARGET_NEED_LDST_LABELS
-static bool tcg_out_ldst_finalize(TCGContext *s);
+static int tcg_out_ldst_finalize(TCGContext *s);
#endif
#define TCG_HIGHWATER 1024
@@ -4000,8 +4000,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
/* Generate TB finalization at the end of block */
#ifdef TCG_TARGET_NEED_LDST_LABELS
- if (!tcg_out_ldst_finalize(s)) {
- return -1;
+ i = tcg_out_ldst_finalize(s);
+ if (i < 0) {
+ return i;
}
#endif
#ifdef TCG_TARGET_NEED_POOL_LABELS