diff options
author | Peter Maydell | 2021-06-28 15:58:33 +0200 |
---|---|---|
committer | Peter Maydell | 2021-07-02 12:48:37 +0200 |
commit | 0aa4b4c358bfced42306de697e6408cabf922cf5 (patch) | |
tree | cc52069dd0a0862fd25527e878b7e3e74b5037d3 /target/arm/translate.c | |
parent | target/arm: Implement MVE long shifts by immediate (diff) | |
download | qemu-0aa4b4c358bfced42306de697e6408cabf922cf5.tar.gz qemu-0aa4b4c358bfced42306de697e6408cabf922cf5.tar.xz qemu-0aa4b4c358bfced42306de697e6408cabf922cf5.zip |
target/arm: Implement MVE long shifts by register
Implement the MVE long shifts by register, which perform shifts on a
pair of general-purpose registers treated as a 64-bit quantity, with
the shift count in another general-purpose register, which might be
either positive or negative.
Like the long-shifts-by-immediate, these encodings sit in the space
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
we have to move the CSEL pattern into the same decodetree group.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r-- | target/arm/translate.c | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c index e0a481fed9..f123752431 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -5792,6 +5792,75 @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a) return do_mve_shl_ri(s, a, gen_urshr64_i64); } +static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn) +{ + TCGv_i64 rda; + TCGv_i32 rdalo, rdahi; + + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + /* Decode falls through to ORR/MOV UNPREDICTABLE handling */ + return false; + } + if (a->rdahi == 15) { + /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */ + return false; + } + if (!dc_isar_feature(aa32_mve, s) || + !arm_dc_feature(s, ARM_FEATURE_M_MAIN) || + a->rdahi == 13 || a->rm == 13 || a->rm == 15 || + a->rm == a->rdahi || a->rm == a->rdalo) { + /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + rda = tcg_temp_new_i64(); + rdalo = load_reg(s, a->rdalo); + rdahi = load_reg(s, a->rdahi); + tcg_gen_concat_i32_i64(rda, rdalo, rdahi); + + /* The helper takes care of the sign-extension of the low 8 bits of Rm */ + fn(rda, cpu_env, rda, cpu_R[a->rm]); + + tcg_gen_extrl_i64_i32(rdalo, rda); + tcg_gen_extrh_i64_i32(rdahi, rda); + store_reg(s, a->rdalo, rdalo); + store_reg(s, a->rdahi, rdahi); + tcg_temp_free_i64(rda); + + return true; +} + +static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_ushll); +} + +static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sshrl); +} + +static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll); +} + +static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl); +} + +static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48); +} + +static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48); +} + /* * Multiply and multiply accumulate */ |