diff options
Diffstat (limited to 'tcg/tcg-op-gvec.c')
-rw-r--r-- | tcg/tcg-op-gvec.c | 164 |
1 files changed, 103 insertions, 61 deletions
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index 327d9588e0..049a55e700 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -326,11 +326,34 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, in units of LNSZ. This limits the expansion of inline code. */ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) { - if (oprsz % lnsz == 0) { - uint32_t lnct = oprsz / lnsz; - return lnct >= 1 && lnct <= MAX_UNROLL; + uint32_t q, r; + + if (oprsz < lnsz) { + return false; + } + + q = oprsz / lnsz; + r = oprsz % lnsz; + tcg_debug_assert((r & 7) == 0); + + if (lnsz < 16) { + /* For sizes below 16, accept no remainder. */ + if (r != 0) { + return false; + } + } else { + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + * In addition, expand_clr needs to handle a multiple of 8. + * Thus we can handle the tail with one more operation per + * diminishing power of 2. + */ + q += ctpop32(r); } - return false; + + return q <= MAX_UNROLL; } static void expand_clr(uint32_t dofs, uint32_t maxsz); @@ -402,22 +425,31 @@ static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in) static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece, uint32_t size, bool prefer_i64) { - if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) { - /* - * Recall that ARM SVE allows vector sizes that are not a - * power of 2, but always a multiple of 16. The intent is - * that e.g. size == 80 would be expanded with 2x32 + 1x16. - * It is hard to imagine a case in which v256 is supported - * but v128 is not, but check anyway. - */ - if (tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) - && (size % 32 == 0 - || tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) { - return TCG_TYPE_V256; - } - } - if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16) - && tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece)) { + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + * It is hard to imagine a case in which v256 is supported + * but v128 is not, but check anyway. + * In addition, expand_clr needs to handle a multiple of 8. + */ + if (TCG_TARGET_HAS_v256 && + check_size_impl(size, 32) && + tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) && + (!(size & 16) || + (TCG_TARGET_HAS_v128 && + tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) && + (!(size & 8) || + (TCG_TARGET_HAS_v64 && + tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) { + return TCG_TYPE_V256; + } + if (TCG_TARGET_HAS_v128 && + check_size_impl(size, 16) && + tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece) && + (!(size & 8) || + (TCG_TARGET_HAS_v64 && + tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) { return TCG_TYPE_V128; } if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8) @@ -432,6 +464,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, { uint32_t i = 0; + tcg_debug_assert(oprsz >= 8); + + /* + * This may be expand_clr for the tail of an operation, e.g. + * oprsz == 8 && maxsz == 64. The first 8 bytes of this store + * are misaligned wrt the maximum vector size, so do that first. + */ + if (dofs & 8) { + tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64); + i += 8; + } + switch (type) { case TCG_TYPE_V256: /* @@ -619,17 +663,22 @@ static void expand_clr(uint32_t dofs, uint32_t maxsz) /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */ static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - void (*fni)(TCGv_i32, TCGv_i32)) + bool load_dest, void (*fni)(TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(t0, cpu_env, aofs + i); - fni(t0, t0); - tcg_gen_st_i32(t0, cpu_env, dofs + i); + if (load_dest) { + tcg_gen_ld_i32(t1, cpu_env, dofs + i); + } + fni(t1, t0); + tcg_gen_st_i32(t1, cpu_env, dofs + i); } tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); } static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, @@ -749,17 +798,22 @@ static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - void (*fni)(TCGv_i64, TCGv_i64)) + bool load_dest, void (*fni)(TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(t0, cpu_env, aofs + i); - fni(t0, t0); - tcg_gen_st_i64(t0, cpu_env, dofs + i); + if (load_dest) { + tcg_gen_ld_i64(t1, cpu_env, dofs + i); + } + fni(t1, t0); + tcg_gen_st_i64(t1, cpu_env, dofs + i); } tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); } static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, @@ -880,17 +934,23 @@ static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, /* Expand OPSZ bytes worth of two-operand operations using host vectors. */ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, + bool load_dest, void (*fni)(unsigned, TCGv_vec, TCGv_vec)) { TCGv_vec t0 = tcg_temp_new_vec(type); + TCGv_vec t1 = tcg_temp_new_vec(type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(t0, cpu_env, aofs + i); - fni(vece, t0, t0); - tcg_gen_st_vec(t0, cpu_env, dofs + i); + if (load_dest) { + tcg_gen_ld_vec(t1, cpu_env, dofs + i); + } + fni(vece, t1, t0); + tcg_gen_st_vec(t1, cpu_env, dofs + i); } tcg_temp_free_vec(t0); + tcg_temp_free_vec(t1); } /* Expand OPSZ bytes worth of two-vector operands and an immediate operand @@ -1044,7 +1104,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); - expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv); + expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, + g->load_dest, g->fniv); if (some == oprsz) { break; } @@ -1054,17 +1115,19 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, maxsz -= some; /* fallthru */ case TCG_TYPE_V128: - expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv); + expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, + g->load_dest, g->fniv); break; case TCG_TYPE_V64: - expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv); + expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, + g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { - expand_2_i64(dofs, aofs, oprsz, g->fni8); + expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { - expand_2_i32(dofs, aofs, oprsz, g->fni4); + expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno); @@ -1541,32 +1604,11 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, } } -void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, uint64_t x) -{ - check_size_align(oprsz, maxsz, dofs); - do_dup(MO_64, dofs, oprsz, maxsz, NULL, NULL, x); -} - -void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, uint32_t x) -{ - check_size_align(oprsz, maxsz, dofs); - do_dup(MO_32, dofs, oprsz, maxsz, NULL, NULL, x); -} - -void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, uint16_t x) -{ - check_size_align(oprsz, maxsz, dofs); - do_dup(MO_16, dofs, oprsz, maxsz, NULL, NULL, x); -} - -void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, uint8_t x) +void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, uint64_t x) { check_size_align(oprsz, maxsz, dofs); - do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, x); + do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x); } void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -2319,7 +2361,7 @@ void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs, }; if (aofs == bofs) { - tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0); + tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0); } else { tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g); } @@ -2336,7 +2378,7 @@ void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs, }; if (aofs == bofs) { - tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0); + tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0); } else { tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g); } @@ -2353,7 +2395,7 @@ void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs, }; if (aofs == bofs) { - tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1); + tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1); } else { tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g); } @@ -2404,7 +2446,7 @@ void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs, }; if (aofs == bofs) { - tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1); + tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1); } else { tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g); } |