summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Henderson2021-08-26 22:08:54 +0200
committerRichard Henderson2021-10-29 05:55:07 +0200
commit3f2b1f8376c11327ca2ea54cdc1085d4d4c1d97c (patch)
tree6a9a946e64cc4c95b87ed68675d7de108648cb12
parenttcg/optimize: Optimize sign extensions (diff)
downloadqemu-3f2b1f8376c11327ca2ea54cdc1085d4d4c1d97c.tar.gz
qemu-3f2b1f8376c11327ca2ea54cdc1085d4d4c1d97c.tar.xz
qemu-3f2b1f8376c11327ca2ea54cdc1085d4d4c1d97c.zip
tcg/optimize: Propagate sign info for logical operations
Sign repetitions are perforce all identical, whether they are 1 or 0. Bitwise operations preserve the relative quantity of the repetitions. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--tcg/optimize.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index ef202abbcb..de1abd9cc3 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -968,6 +968,13 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
ctx->z_mask = z1 & z2;
/*
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
+ * Bitwise operations preserve the relative quantity of the repetitions.
+ */
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
+
+ /*
* Known-zeros does not imply known-ones. Therefore unless
* arg2 is constant, we can't infer affected bits from it.
*/
@@ -1002,6 +1009,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = z1;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1300,6 +1309,9 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1487,6 +1499,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[3])->z_mask
| arg_info(op->args[4])->z_mask;
+ ctx->s_mask = arg_info(op->args[3])->s_mask
+ & arg_info(op->args[4])->s_mask;
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
@@ -1585,6 +1599,9 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, -1)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1614,6 +1631,9 @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1623,6 +1643,8 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
return true;
}
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
+
/* Because of fold_to_not, we want to always return true, via finish. */
finish_folding(ctx, op);
return true;
@@ -1638,6 +1660,8 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1649,6 +1673,9 @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
fold_ix_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1922,6 +1949,8 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}