diff options
author | Richard Henderson | 2022-07-08 17:15:21 +0200 |
---|---|---|
committer | Peter Maydell | 2022-07-11 14:43:47 +0200 |
commit | 3916841ac75e74f54fb8cce0292e7e1e9851fb22 (patch) | |
tree | 01bb64f8925d2079b99818f885bd929457ab58c4 /target/arm/sme_helper.c | |
parent | target/arm: Implement BFMOPA, BFMOPS (diff) | |
download | qemu-3916841ac75e74f54fb8cce0292e7e1e9851fb22.tar.gz qemu-3916841ac75e74f54fb8cce0292e7e1e9851fb22.tar.xz qemu-3916841ac75e74f54fb8cce0292e7e1e9851fb22.zip |
target/arm: Implement FMOPA, FMOPS (widening)
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20220708151540.18136-27-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/sme_helper.c')
-rw-r--r-- | target/arm/sme_helper.c | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c index 690a53eee2..302f89c30b 100644 --- a/target/arm/sme_helper.c +++ b/target/arm/sme_helper.c @@ -1008,6 +1008,80 @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) return pair; } +static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, + float_status *s_std, float_status *s_odd) +{ + float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std); + float64 e1c = float16_to_float64(e1 >> 16, true, s_std); + float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std); + float64 e2c = float16_to_float64(e2 >> 16, true, s_std); + float64 t64; + float32 t32; + + /* + * The ARM pseudocode function FPDot performs both multiplies + * and the add with a single rounding operation. Emulate this + * by performing the first multiply in round-to-odd, then doing + * the second multiply as fused multiply-add, and rounding to + * float32 all in one step. + */ + t64 = float64_mul(e1r, e2r, s_odd); + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); + + /* This conversion is exact, because we've already rounded. */ + t32 = float64_to_float32(t64, s_std); + + /* The final accumulation step is not fused. */ + return float32_add(sum, t32, s_std); +} + +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) * 0x80008000u; + uint16_t *pn = vpn, *pm = vpm; + float_status fpst_odd, fpst_std; + + /* + * Make a copy of float_status because this operation does not + * update the cumulative fp exception status. It also produces + * default nans. Make a second copy with round-to-odd -- see above. + */ + fpst_std = *(float_status *)vst; + set_default_nan_mode(true, &fpst_std); + fpst_odd = fpst_std; + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + n = f16mop_adj_pair(n, prow, neg); + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd); + + col += 4; + pcol >>= 4; + } + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } +} + void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, void *vpm, uint32_t desc) { |