diff options
author | Paul Brook | 2022-04-25 00:01:38 +0200 |
---|---|---|
committer | Paolo Bonzini | 2022-09-01 20:16:33 +0200 |
commit | 6f218d6e994bd8b229d6522899b6ac6cd98bdb47 (patch) | |
tree | 1a8808fd1d1fe9acd815a462cc7d96fb45fa90bf /target | |
parent | target/i386: reimplement AVX comparison helpers (diff) | |
download | qemu-6f218d6e994bd8b229d6522899b6ac6cd98bdb47.tar.gz qemu-6f218d6e994bd8b229d6522899b6ac6cd98bdb47.tar.xz qemu-6f218d6e994bd8b229d6522899b6ac6cd98bdb47.zip |
target/i386: Dot product AVX helper prep
Make the dpps and dppd helpers AVX-ready
I can't see any obvious reason why dppd shouldn't work on 256 bit ymm
registers, but both AMD and Intel agree that it's xmm only.
Signed-off-by: Paul Brook <paul@nowt.org>
Message-Id: <20220424220204.2493824-17-paul@nowt.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target')
-rw-r--r-- | target/i386/ops_sse.h | 80 |
1 files changed, 45 insertions, 35 deletions
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 7463ff1599..c9737e16b9 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -1903,55 +1903,64 @@ SSE_HELPER_I(helper_blendps, L, 4, FBLENDP) SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) -void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t mask) { + Reg *v = d; float32 prod1, prod2, temp2, temp3, temp4; + int i; - /* - * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D - * to correctly round the intermediate results - */ - if (mask & (1 << 4)) { - prod1 = float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status); - } else { - prod1 = float32_zero; - } - if (mask & (1 << 5)) { - prod2 = float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status); - } else { - prod2 = float32_zero; - } - temp2 = float32_add(prod1, prod2, &env->sse_status); - if (mask & (1 << 6)) { - prod1 = float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status); - } else { - prod1 = float32_zero; - } - if (mask & (1 << 7)) { - prod2 = float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status); - } else { - prod2 = float32_zero; - } - temp3 = float32_add(prod1, prod2, &env->sse_status); - temp4 = float32_add(temp2, temp3, &env->sse_status); + for (i = 0; i < 2 << SHIFT; i += 4) { + /* + * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D + * to correctly round the intermediate results + */ + if (mask & (1 << 4)) { + prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); + } else { + prod1 = float32_zero; + } + if (mask & (1 << 5)) { + prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); + } else { + prod2 = float32_zero; + } + temp2 = float32_add(prod1, prod2, &env->sse_status); + if (mask & (1 << 6)) { + prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status); + } else { + prod1 = float32_zero; + } + if (mask & (1 << 7)) { + prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status); + } else { + prod2 = float32_zero; + } + temp3 = float32_add(prod1, prod2, &env->sse_status); + temp4 = float32_add(temp2, temp3, &env->sse_status); - d->ZMM_S(0) = (mask & (1 << 0)) ? temp4 : float32_zero; - d->ZMM_S(1) = (mask & (1 << 1)) ? temp4 : float32_zero; - d->ZMM_S(2) = (mask & (1 << 2)) ? temp4 : float32_zero; - d->ZMM_S(3) = (mask & (1 << 3)) ? temp4 : float32_zero; + d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero; + d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero; + d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero; + d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero; + } } -void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +#if SHIFT == 1 +/* Oddly, there is no ymm version of dppd */ +void glue(helper_dppd, SUFFIX)(CPUX86State *env, + Reg *d, Reg *s, uint32_t mask) { + Reg *v = d; float64 prod1, prod2, temp2; if (mask & (1 << 4)) { - prod1 = float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status); + prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status); } else { prod1 = float64_zero; } if (mask & (1 << 5)) { - prod2 = float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status); + prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status); } else { prod2 = float64_zero; } @@ -1959,6 +1968,7 @@ void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero; d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero; } +#endif void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t offset) |