summaryrefslogtreecommitdiffstats
path: root/include/fpu/softfloat-macros.h
diff options
context:
space:
mode:
authorRichard Henderson2018-10-03 16:35:51 +0200
committerRichard Henderson2018-10-05 19:57:41 +0200
commit5dfbc9e4903c0121140f2945f05df48cea72dd82 (patch)
treea3919c99dfd9c4144513dc134836ffc902977a72 /include/fpu/softfloat-macros.h
parentsoftfloat: Replace countLeadingZeros32/64 with clz32/64 (diff)
downloadqemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.tar.gz
qemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.tar.xz
qemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.zip
softfloat: Fix division
The __udiv_qrnnd primitive that we nicked from gmp requires its inputs to be normalized. We were not doing that. Because the inputs are nearly normalized already, finishing that is trivial. Replace div128to64 with a "proper" udiv_qrnnd, so that this remains a reusable primitive. Fixes: cf07323d494 Fixes: https://bugs.launchpad.net/qemu/+bug/1793119 Tested-by: Emilio G. Cota <cota@braap.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/fpu/softfloat-macros.h')
-rw-r--r--include/fpu/softfloat-macros.h34
1 files changed, 25 insertions, 9 deletions
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
index edc682139e..a1d99c730d 100644
--- a/include/fpu/softfloat-macros.h
+++ b/include/fpu/softfloat-macros.h
@@ -329,15 +329,30 @@ static inline void
| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- shortShift128Left(
- uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr)
+static inline void shortShift128Left(uint64_t a0, uint64_t a1, int count,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
{
+ *z1Ptr = a1 << count;
+ *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63));
+}
- *z1Ptr = a1<<count;
- *z0Ptr =
- ( count == 0 ) ? a0 : ( a0<<count ) | ( a1>>( ( - count ) & 63 ) );
+/*----------------------------------------------------------------------------
+| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the
+| number of bits given in `count'. Any bits shifted off are lost. The value
+| of `count' may be greater than 64. The result is broken into two 64-bit
+| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+static inline void shift128Left(uint64_t a0, uint64_t a1, int count,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
+{
+ if (count < 64) {
+ *z1Ptr = a1 << count;
+ *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63));
+ } else {
+ *z1Ptr = 0;
+ *z0Ptr = a1 << (count - 64);
+ }
}
/*----------------------------------------------------------------------------
@@ -619,7 +634,8 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
*
* Licensed under the GPLv2/LGPLv3
*/
-static inline uint64_t div128To64(uint64_t n0, uint64_t n1, uint64_t d)
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
+ uint64_t n0, uint64_t d)
{
uint64_t d0, d1, q0, q1, r1, r0, m;
@@ -658,8 +674,8 @@ static inline uint64_t div128To64(uint64_t n0, uint64_t n1, uint64_t d)
}
r0 -= m;
- /* Return remainder in LSB */
- return (q1 << 32) | q0 | (r0 != 0);
+ *r = r0;
+ return (q1 << 32) | q0;
}
/*----------------------------------------------------------------------------