summaryrefslogtreecommitdiffstats
path: root/target/arm/helper.c
diff options
context:
space:
mode:
authorRichard Henderson2018-05-15 15:58:43 +0200
committerPeter Maydell2018-05-15 15:58:43 +0200
commit564a0632504fad840491aa9a59453f4e64a316c4 (patch)
tree21a983526571d7c54e2aa192a741b882cd30533e /target/arm/helper.c
parenttarget/arm: Early exit after unallocated_encoding in disas_fp_int_conv (diff)
downloadqemu-564a0632504fad840491aa9a59453f4e64a316c4.tar.gz
qemu-564a0632504fad840491aa9a59453f4e64a316c4.tar.xz
qemu-564a0632504fad840491aa9a59453f4e64a316c4.zip
target/arm: Implement FCVT (scalar, integer) for fp16
Cc: qemu-stable@nongnu.org Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20180512003217.9105-4-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 817f9d81a0..c6fd7f9479 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -11427,8 +11427,12 @@ VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
#undef VFP_CONV_FIX_A64
/* Conversion to/from f16 can overflow to infinity before/after scaling.
- * Therefore we convert to f64 (which does not round), scale,
- * and then convert f64 to f16 (which may round).
+ * Therefore we convert to f64, scale, and then convert f64 to f16; or
+ * vice versa for conversion to integer.
+ *
+ * For 16- and 32-bit integers, the conversion to f64 never rounds.
+ * For 64-bit integers, any integer that would cause rounding will also
+ * overflow to f16 infinity, so there is no double rounding problem.
*/
static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
@@ -11446,6 +11450,16 @@ float16 HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
}
+float16 HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
+{
+ return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst);
+}
+
+float16 HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
+{
+ return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst);
+}
+
static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
{
if (unlikely(float16_is_any_nan(f))) {
@@ -11475,6 +11489,26 @@ uint32_t HELPER(vfp_touhh)(float16 x, uint32_t shift, void *fpst)
return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
}
+uint32_t HELPER(vfp_toslh)(float16 x, uint32_t shift, void *fpst)
+{
+ return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst);
+}
+
+uint32_t HELPER(vfp_toulh)(float16 x, uint32_t shift, void *fpst)
+{
+ return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst);
+}
+
+uint64_t HELPER(vfp_tosqh)(float16 x, uint32_t shift, void *fpst)
+{
+ return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst);
+}
+
+uint64_t HELPER(vfp_touqh)(float16 x, uint32_t shift, void *fpst)
+{
+ return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst);
+}
+
/* Set the current fp rounding mode and return the old one.
* The argument is a softfloat float_round_ value.
*/