summaryrefslogtreecommitdiffstats
path: root/tcg/tcg-op-gvec.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/tcg-op-gvec.c')
-rw-r--r--tcg/tcg-op-gvec.c61
1 files changed, 54 insertions, 7 deletions
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 3707c0effb..7ebd9e8298 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -1570,18 +1570,16 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
- } else {
+ } else if (vece == 4) {
/* 128-bit duplicate. */
- /* ??? Dup to 256-bit vector. */
int i;
- tcg_debug_assert(vece == 4);
tcg_debug_assert(oprsz >= 16);
if (TCG_TARGET_HAS_v128) {
TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
tcg_gen_ld_vec(in, cpu_env, aofs);
- for (i = 0; i < oprsz; i += 16) {
+ for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
tcg_gen_st_vec(in, cpu_env, dofs + i);
}
tcg_temp_free_vec(in);
@@ -1591,7 +1589,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_gen_ld_i64(in0, cpu_env, aofs);
tcg_gen_ld_i64(in1, cpu_env, aofs + 8);
- for (i = 0; i < oprsz; i += 16) {
+ for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
tcg_gen_st_i64(in0, cpu_env, dofs + i);
tcg_gen_st_i64(in1, cpu_env, dofs + i + 8);
}
@@ -1601,6 +1599,54 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
}
+ } else if (vece == 5) {
+ /* 256-bit duplicate. */
+ int i;
+
+ tcg_debug_assert(oprsz >= 32);
+ tcg_debug_assert(oprsz % 32 == 0);
+ if (TCG_TARGET_HAS_v256) {
+ TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
+
+ tcg_gen_ld_vec(in, cpu_env, aofs);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in, cpu_env, dofs + i);
+ }
+ tcg_temp_free_vec(in);
+ } else if (TCG_TARGET_HAS_v128) {
+ TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
+ TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
+
+ tcg_gen_ld_vec(in0, cpu_env, aofs);
+ tcg_gen_ld_vec(in1, cpu_env, aofs + 16);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in0, cpu_env, dofs + i);
+ tcg_gen_st_vec(in1, cpu_env, dofs + i + 16);
+ }
+ tcg_temp_free_vec(in0);
+ tcg_temp_free_vec(in1);
+ } else {
+ TCGv_i64 in[4];
+ int j;
+
+ for (j = 0; j < 4; ++j) {
+ in[j] = tcg_temp_new_i64();
+ tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8);
+ }
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ for (j = 0; j < 4; ++j) {
+ tcg_gen_st_i64(in[j], cpu_env, dofs + i + j * 8);
+ }
+ }
+ for (j = 0; j < 4; ++j) {
+ tcg_temp_free_i64(in[j]);
+ }
+ }
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+ } else {
+ g_assert_not_reached();
}
}
@@ -2264,12 +2310,13 @@ static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
/*
- * Invert (via xor -1) and add one (via sub -1).
+ * Invert (via xor -1) and add one.
* Because of the ordering the msb is cleared,
* so we never have carry into the next element.
*/
tcg_gen_xor_i64(d, b, t);
- tcg_gen_sub_i64(d, d, t);
+ tcg_gen_andi_i64(t, t, dup_const(vece, 1));
+ tcg_gen_add_i64(d, d, t);
tcg_temp_free_i64(t);
}