summaryrefslogtreecommitdiffstats
path: root/fpu
diff options
context:
space:
mode:
authorRichard Henderson2020-11-22 04:20:23 +0100
committerRichard Henderson2021-06-03 23:09:02 +0200
commit572c4d862ff2b5f1525044639aa60ec5854c813d (patch)
tree77ad0b8e4cb5913d8bb99009c78e504b6f057e90 /fpu
parentsoftfloat: Convert floatx80 compare to FloatParts (diff)
downloadqemu-572c4d862ff2b5f1525044639aa60ec5854c813d.tar.gz
qemu-572c4d862ff2b5f1525044639aa60ec5854c813d.tar.xz
qemu-572c4d862ff2b5f1525044639aa60ec5854c813d.zip
softfloat: Convert float32_exp2 to FloatParts
Keep the intermediate results in FloatParts instead of converting back and forth between float64. Use muladd instead of separate mul+add. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'fpu')
-rw-r--r--fpu/softfloat.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index c32b1c7113..27306d6a93 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -5210,47 +5210,40 @@ static const float64 float32_exp2_coefficients[15] =
float32 float32_exp2(float32 a, float_status *status)
{
- bool aSign;
- int aExp;
- uint32_t aSig;
- float64 r, x, xn;
+ FloatParts64 xp, xnp, tp, rp;
int i;
- a = float32_squash_input_denormal(a, status);
- aSig = extractFloat32Frac( a );
- aExp = extractFloat32Exp( a );
- aSign = extractFloat32Sign( a );
-
- if ( aExp == 0xFF) {
- if (aSig) {
- return propagateFloat32NaN(a, float32_zero, status);
+ float32_unpack_canonical(&xp, a, status);
+ if (unlikely(xp.cls != float_class_normal)) {
+ switch (xp.cls) {
+ case float_class_snan:
+ case float_class_qnan:
+ parts_return_nan(&xp, status);
+ return float32_round_pack_canonical(&xp, status);
+ case float_class_inf:
+ return xp.sign ? float32_zero : a;
+ case float_class_zero:
+ return float32_one;
+ default:
+ break;
}
- return (aSign) ? float32_zero : a;
- }
- if (aExp == 0) {
- if (aSig == 0) return float32_one;
+ g_assert_not_reached();
}
float_raise(float_flag_inexact, status);
- /* ******************************* */
- /* using float64 for approximation */
- /* ******************************* */
- x = float32_to_float64(a, status);
- x = float64_mul(x, float64_ln2, status);
+ float64_unpack_canonical(&xnp, float64_ln2, status);
+ xp = *parts_mul(&xp, &tp, status);
+ xnp = xp;
- xn = x;
- r = float64_one;
+ float64_unpack_canonical(&rp, float64_one, status);
for (i = 0 ; i < 15 ; i++) {
- float64 f;
-
- f = float64_mul(xn, float32_exp2_coefficients[i], status);
- r = float64_add(r, f, status);
-
- xn = float64_mul(xn, x, status);
+ float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
+ rp = *parts_muladd(&tp, &xp, &rp, 0, status);
+ xnp = *parts_mul(&xnp, &xp, status);
}
- return float64_to_float32(r, status);
+ return float32_round_pack_canonical(&rp, status);
}
/*----------------------------------------------------------------------------