summaryrefslogtreecommitdiffstats
path: root/target/arm/translate.c
diff options
context:
space:
mode:
authorPeter Maydell2021-06-28 15:58:32 +0200
committerPeter Maydell2021-07-02 12:48:37 +0200
commitf4ae6c8cbda8d9b21290e9b8ae21b785ca24aace (patch)
treee48fada5b87faf37247fafdd739889b5ea6a44f0 /target/arm/translate.c
parenttarget/arm: Implement MVE VADDLV (diff)
downloadqemu-f4ae6c8cbda8d9b21290e9b8ae21b785ca24aace.tar.gz
qemu-f4ae6c8cbda8d9b21290e9b8ae21b785ca24aace.tar.xz
qemu-f4ae6c8cbda8d9b21290e9b8ae21b785ca24aace.zip
target/arm: Implement MVE long shifts by immediate
The MVE extension to v8.1M includes some new shift instructions which sit entirely within the non-coprocessor part of the encoding space and which operate only on general-purpose registers. They take up the space which was previously UNPREDICTABLE MOVS and ORRS encodings with Rm == 13 or 15. Implement the long shifts by immediate, which perform shifts on a pair of general-purpose registers treated as a 64-bit quantity, with an immediate shift count between 1 and 32. Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for the Rm==13,15 case, we need to explicitly emit code to UNDEF for the cases where v8.1M now requires that. (Trying to change MOVS and ORRS is too difficult, because the functions that generate the code are shared between a dozen different kinds of arithmetic or logical instruction for all A32, T16 and T32 encodings, and for some insns and some encodings Rm==13,15 are valid.) We make the helper functions we need for UQSHLL and SQSHLL take a 32-bit value which the helper casts to int8_t because we'll need these helpers also for the shift-by-register insns, where the shift count might be < 0 or > 32. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r--target/arm/translate.c90
1 files changed, 90 insertions, 0 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 66b24ab56e..e0a481fed9 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5703,6 +5703,96 @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
}
/*
+ * v8.1M MVE wide-shifts
+ */
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
+ WideShiftImmFn *fn)
+{
+ TCGv_i64 rda;
+ TCGv_i32 rdalo, rdahi;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
+ return false;
+ }
+ if (a->rdahi == 15) {
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
+ return false;
+ }
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
+ a->rdahi == 13) {
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ if (a->shim == 0) {
+ a->shim = 32;
+ }
+
+ rda = tcg_temp_new_i64();
+ rdalo = load_reg(s, a->rdalo);
+ rdahi = load_reg(s, a->rdahi);
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+
+ fn(rda, rda, a->shim);
+
+ tcg_gen_extrl_i64_i32(rdalo, rda);
+ tcg_gen_extrh_i64_i32(rdahi, rda);
+ store_reg(s, a->rdalo, rdalo);
+ store_reg(s, a->rdahi, rdahi);
+ tcg_temp_free_i64(rda);
+
+ return true;
+}
+
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
+}
+
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
+}
+
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
+}
+
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
+{
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
+}
+
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
+{
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
+}
+
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
+}
+
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
+}
+
+/*
* Multiply and multiply accumulate
*/