summaryrefslogtreecommitdiffstats
path: root/target/arm/translate-vfp.inc.c
diff options
context:
space:
mode:
authorPeter Maydell2019-06-11 17:39:44 +0200
committerPeter Maydell2019-06-13 16:14:04 +0200
commita9ab50011aeda2dd012da99069e078379315ea18 (patch)
tree25557aea6118b2c1db20cef68970cc1f94f0aadc /target/arm/translate-vfp.inc.c
parenttarget/arm: Convert "double-precision" register moves to decodetree (diff)
downloadqemu-a9ab50011aeda2dd012da99069e078379315ea18.tar.gz
qemu-a9ab50011aeda2dd012da99069e078379315ea18.tar.xz
qemu-a9ab50011aeda2dd012da99069e078379315ea18.zip
target/arm: Convert "single-precision" register moves to decodetree
Convert the "single-precision" register moves to decodetree: * VMSR * VMRS * VMOV between general purpose register and single precision Note that the VMSR/VMRS conversions make our handling of the "should this UNDEF?" checks consistent between the two instructions: * VMSR to MVFR0, MVFR1, MVFR2 now UNDEF from EL0 (previously was a nop) * VMSR to FPSID now UNDEFs from EL0 or if VFPv3 or better (previously was a nop) * VMSR to FPINST and FPINST2 now UNDEF if VFPv3 or better (previously would write to the register, which had no guest-visible effect because we always UNDEF reads) We also tighten up the decode: we were previously underdecoding some SBZ or SBO bits. The conversion of VMOV_single includes the expansion out of the gen_mov_F0_vreg()/gen_vfp_mrs() and gen_mov_vreg_F0()/gen_vfp_msr() sequences into the simpler direct load/store of the TCG temp via neon_{load,store}_reg32(): we know in the new function that we're always single-precision, we don't need to use the old-and-deprecated cpu_F0* TCG globals, and we don't happen to have the declaration of gen_vfp_msr() and gen_vfp_mrs() at the point in the file where the new function is. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/arm/translate-vfp.inc.c')
-rw-r--r--target/arm/translate-vfp.inc.c161
1 files changed, 161 insertions, 0 deletions
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index 8b0899fa05..74c10f9024 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -622,3 +622,164 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
return true;
}
+
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
+{
+ TCGv_i32 tmp;
+ bool ignore_vfp_enabled = false;
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ /*
+ * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
+ * Writes to R15 are UNPREDICTABLE; we choose to undef.
+ */
+ if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
+ return false;
+ }
+ }
+
+ switch (a->reg) {
+ case ARM_VFP_FPSID:
+ /*
+ * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
+ * all ID registers to privileged access only.
+ */
+ if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_MVFR2:
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_FPSCR:
+ break;
+ case ARM_VFP_FPEXC:
+ if (IS_USER(s)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ /* Not present in VFPv3 */
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
+ return true;
+ }
+
+ if (a->l) {
+ /* VMRS, move VFP special register to gp register */
+ switch (a->reg) {
+ case ARM_VFP_FPSID:
+ case ARM_VFP_FPEXC:
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ tmp = load_cpu_field(vfp.xregs[a->reg]);
+ break;
+ case ARM_VFP_FPSCR:
+ if (a->rt == 15) {
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
+ } else {
+ tmp = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (a->rt == 15) {
+ /* Set the 4 flag bits in the CPSR. */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, a->rt, tmp);
+ }
+ } else {
+ /* VMSR, move gp register to VFP special register */
+ switch (a->reg) {
+ case ARM_VFP_FPSID:
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ /* Writes are ignored. */
+ break;
+ case ARM_VFP_FPSCR:
+ tmp = load_reg(s, a->rt);
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPEXC:
+ /*
+ * TODO: VFP subarchitecture support.
+ * For now, keep the EN bit only
+ */
+ tmp = load_reg(s, a->rt);
+ tcg_gen_andi_i32(tmp, tmp, 1 << 30);
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ tmp = load_reg(s, a->rt);
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ return true;
+}
+
+static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
+{
+ TCGv_i32 tmp;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (a->l) {
+ /* VFP to general purpose register */
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vn);
+ if (a->rt == 15) {
+ /* Set the 4 flag bits in the CPSR. */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, a->rt, tmp);
+ }
+ } else {
+ /* general purpose register to VFP */
+ tmp = load_reg(s, a->rt);
+ neon_store_reg32(tmp, a->vn);
+ tcg_temp_free_i32(tmp);
+ }
+
+ return true;
+}