From 636ddeb15c086e67c29751764eb7fa77450a1d66 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 May 2021 18:03:16 -0700 Subject: target/arm: Pass separate addend to FCMLA helpers For SVE, we potentially have a 4th argument coming from the movprfx instruction. Currently we do not optimize movprfx, so the problem is not visible. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson Message-id: 20210525010358.152808-51-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/vec_helper.c | 50 ++++++++++++++++++++----------------------------- 1 file changed, 20 insertions(+), 30 deletions(-) (limited to 'target/arm/vec_helper.c') diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index f88e572132..b19877e0d3 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -657,13 +657,11 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float16 *d = vd; - float16 *n = vn; - float16 *m = vm; + float16 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -680,19 +678,17 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, float16 e4 = e2; float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; - d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); - d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); + d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst); + d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float16 *d = vd; - float16 *n = vn; - float16 *m = vm; + float16 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -716,20 +712,18 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, float16 e2 = n[H2(j + flip)]; float16 e4 = e2; - d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); - d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); + d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst); + d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float32 *d = vd; - float32 *n = vn; - float32 *m = vm; + float32 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -746,19 +740,17 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, float32 e4 = e2; float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; - d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); - d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); + d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst); + d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float32 *d = vd; - float32 *n = vn; - float32 *m = vm; + float32 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -782,20 +774,18 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, float32 e2 = n[H4(j + flip)]; float32 e4 = e2; - d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); - d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); + d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst); + d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, +void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va, void *vfpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); - float64 *d = vd; - float64 *n = vn; - float64 *m = vm; + float64 *d = vd, *n = vn, *m = vm, *a = va; float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); @@ -812,8 +802,8 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, float64 e4 = e2; float64 e3 = m[i + 1 - flip] ^ neg_imag; - d[i] = float64_muladd(e2, e1, d[i], 0, fpst); - d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); + d[i] = float64_muladd(e2, e1, a[i], 0, fpst); + d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -- cgit v1.2.3-55-g7522