summaryrefslogtreecommitdiffstats
path: root/target/arm
diff options
context:
space:
mode:
authorPeter Maydell2019-08-16 18:21:39 +0200
committerPeter Maydell2019-08-16 18:21:40 +0200
commitafd760539308a5524accf964107cdb1d54a059e3 (patch)
tree010ff88dcad4835f0d863e38515119ff0de1a550 /target/arm
parentMerge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (diff)
parenttarget/arm: Use tcg_gen_extrh_i64_i32 to extract the high word (diff)
downloadqemu-afd760539308a5524accf964107cdb1d54a059e3.tar.gz
qemu-afd760539308a5524accf964107cdb1d54a059e3.tar.xz
qemu-afd760539308a5524accf964107cdb1d54a059e3.zip
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190816' into staging
target-arm queue: * target/arm: generate a custom MIDR for -cpu max * hw/misc/zynq_slcr: refactor to use standard register definition * Set ENET_BD_BDU in I.MX FEC controller * target/arm: Fix routing of singlestep exceptions * refactor a32/t32 decoder handling of PC * minor optimisations/cleanups of some a32/t32 codegen * target/arm/cpu64: Ensure kvm really supports aarch64=off * target/arm/cpu: Ensure we can use the pmu with kvm * target/arm: Minor cleanups preparatory to KVM SVE support # gpg: Signature made Fri 16 Aug 2019 14:15:55 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20190816: (29 commits) target/arm: Use tcg_gen_extrh_i64_i32 to extract the high word target/arm: Simplify SMMLA, SMMLAR, SMMLS, SMMLSR target/arm: Use tcg_gen_rotri_i32 for gen_swap_half target/arm: Use ror32 instead of open-coding the operation target/arm: Remove redundant shift tests target/arm: Use tcg_gen_deposit_i32 for PKHBT, PKHTB target/arm: Use tcg_gen_extract_i32 for shifter_out_im target/arm/kvm64: Move the get/put of fpsimd registers out target/arm/kvm64: Fix error returns target/arm/cpu: Use div-round-up to determine predicate register array size target/arm/helper: zcr: Add build bug next to value range assumption target/arm/cpu: Ensure we can use the pmu with kvm target/arm/cpu64: Ensure kvm really supports aarch64=off target/arm: Remove helper_double_saturate target/arm: Use unallocated_encoding for aarch32 target/arm: Remove offset argument to gen_exception_bkpt_insn target/arm: Replace offset with pc in gen_exception_internal_insn target/arm: Replace offset with pc in gen_exception_insn target/arm: Replace s->pc with s->base.pc_next target/arm: Remove redundant s->pc & ~1 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/cpu.c30
-rw-r--r--target/arm/cpu.h13
-rw-r--r--target/arm/cpu64.c31
-rw-r--r--target/arm/helper.c7
-rw-r--r--target/arm/helper.h1
-rw-r--r--target/arm/kvm.c7
-rw-r--r--target/arm/kvm64.c157
-rw-r--r--target/arm/kvm_arm.h28
-rw-r--r--target/arm/op_helper.c15
-rw-r--r--target/arm/translate-a64.c130
-rw-r--r--target/arm/translate-a64.h4
-rw-r--r--target/arm/translate-vfp.inc.c45
-rw-r--r--target/arm/translate.c572
-rw-r--r--target/arm/translate.h39
14 files changed, 540 insertions, 539 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index ec2ab95dbe..2399c14471 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -994,10 +994,6 @@ static Property arm_cpu_has_el3_property =
static Property arm_cpu_cfgend_property =
DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
-/* use property name "pmu" to match other archs and virt tools */
-static Property arm_cpu_has_pmu_property =
- DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
-
static Property arm_cpu_has_vfp_property =
DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
@@ -1020,6 +1016,29 @@ static Property arm_cpu_pmsav7_dregion_property =
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
+static bool arm_get_pmu(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return cpu->has_pmu;
+}
+
+static void arm_set_pmu(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ if (value) {
+ if (kvm_enabled() && !kvm_arm_pmu_supported(CPU(cpu))) {
+ error_setg(errp, "'pmu' feature not supported by KVM on this host");
+ return;
+ }
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ } else {
+ unset_feature(&cpu->env, ARM_FEATURE_PMU);
+ }
+ cpu->has_pmu = value;
+}
+
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -1094,7 +1113,8 @@ void arm_cpu_post_init(Object *obj)
}
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
- qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
+ cpu->has_pmu = true;
+ object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu,
&error_abort);
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 2cdde6c4bc..0981303170 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -195,7 +195,7 @@ typedef struct ARMVectorReg {
#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
- uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
+ uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
} ARMPredicateReg;
/* In AArch32 mode, PAC keys do not exist at all. */
@@ -1611,6 +1611,12 @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
/*
* System register ID fields.
*/
+FIELD(MIDR_EL1, REVISION, 0, 4)
+FIELD(MIDR_EL1, PARTNUM, 4, 12)
+FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
+FIELD(MIDR_EL1, VARIANT, 20, 4)
+FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
+
FIELD(ID_ISAR0, SWAP, 0, 4)
FIELD(ID_ISAR0, BITCOUNT, 4, 4)
FIELD(ID_ISAR0, BITFIELD, 8, 4)
@@ -3142,6 +3148,11 @@ FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1)
/* Target EL if we take a floating-point-disabled exception */
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2)
FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
+/*
+ * For A-profile only, target EL for debug exceptions.
+ * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
+ */
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
/* Bit usage when in AArch32 state: */
FIELD(TBFLAG_A32, THUMB, 0, 1)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index e4edff548e..d7f5bf610a 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -295,6 +295,25 @@ static void aarch64_max_initfn(Object *obj)
uint32_t u;
aarch64_a57_initfn(obj);
+ /*
+ * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
+ * one and try to apply errata workarounds or use impdef features we
+ * don't provide.
+ * An IMPLEMENTER field of 0 means "reserved for software use";
+ * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
+ * to see which features are present";
+ * the VARIANT, PARTNUM and REVISION fields are all implementation
+ * defined and we choose to define PARTNUM just in case guest
+ * code needs to distinguish this QEMU CPU from other software
+ * implementations, though this shouldn't be needed.
+ */
+ t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
+ t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
+ t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
+ t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
+ t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
+ cpu->midr = t;
+
t = cpu->isar.id_aa64isar0;
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
@@ -406,13 +425,13 @@ static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
* restriction allows us to avoid fixing up functionality that assumes a
* uniform execution state like do_interrupt.
*/
- if (!kvm_enabled()) {
- error_setg(errp, "'aarch64' feature cannot be disabled "
- "unless KVM is enabled");
- return;
- }
-
if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
} else {
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index edee110ae1..7e0d5398ab 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -5302,6 +5302,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
int new_len;
/* Bits other than [3:0] are RAZ/WI. */
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
raw_write(env, ri, value & 0xf);
/*
@@ -11172,6 +11173,12 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ int target_el = arm_debug_target_el(env);
+
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, target_el);
+ }
+
*pflags = flags;
*cs_base = 0;
}
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 132aa1682e..1fb2cb5a77 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -6,7 +6,6 @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32)
DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
-DEF_HELPER_2(double_saturate, i32, env, s32)
DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index b5713e3ffe..b2eaa50b8d 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -164,6 +164,13 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
env->features = arm_host_cpu_features.features;
}
+bool kvm_arm_pmu_supported(CPUState *cpu)
+{
+ KVMState *s = KVM_STATE(current_machine->accelerator);
+
+ return kvm_check_extension(s, KVM_CAP_ARM_PMU_V3);
+}
+
int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
{
KVMState *s = KVM_STATE(ms->accelerator);
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 59ed579d02..28f6db57d5 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -24,7 +24,9 @@
#include "qemu/main-loop.h"
#include "exec/gdbstub.h"
#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
#include "kvm_arm.h"
+#include "hw/boards.h"
#include "internals.h"
static bool have_guest_debug;
@@ -593,6 +595,13 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
return true;
}
+bool kvm_arm_aarch32_supported(CPUState *cpu)
+{
+ KVMState *s = KVM_STATE(current_machine->accelerator);
+
+ return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
+}
+
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
int kvm_arch_init_vcpu(CPUState *cs)
@@ -710,13 +719,53 @@ int kvm_arm_cpreg_level(uint64_t regidx)
#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-int kvm_arch_put_registers(CPUState *cs, int level)
+static int kvm_arch_put_fpsimd(CPUState *cs)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
struct kvm_one_reg reg;
uint32_t fpr;
+ int i, ret;
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t fp_val[2] = { q[1], q[0] };
+ reg.addr = (uintptr_t)fp_val;
+#else
+ reg.addr = (uintptr_t)q;
+#endif
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpsr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpcr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ struct kvm_one_reg reg;
uint64_t val;
- int i;
- int ret;
+ int i, ret;
unsigned int el;
ARMCPU *cpu = ARM_CPU(cs);
@@ -806,61 +855,75 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
- /* Advanced SIMD and FP registers. */
+ ret = kvm_arch_put_fpsimd(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ write_cpustate_to_list(cpu, true);
+
+ if (!write_list_to_kvmstate(cpu, level)) {
+ return -EINVAL;
+ }
+
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
+ return ret;
+}
+
+static int kvm_arch_get_fpsimd(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ uint32_t fpr;
+ int i, ret;
+
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
-#ifdef HOST_WORDS_BIGENDIAN
- uint64_t fp_val[2] = { q[1], q[0] };
- reg.addr = (uintptr_t)fp_val;
-#else
- reg.addr = (uintptr_t)q;
-#endif
reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ reg.addr = (uintptr_t)q;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t t;
+ t = q[0], q[0] = q[1], q[1] = t;
+#endif
}
}
reg.addr = (uintptr_t)(&fpr);
- fpr = vfp_get_fpsr(env);
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
}
+ vfp_set_fpsr(env, fpr);
- fpr = vfp_get_fpcr(env);
+ reg.addr = (uintptr_t)(&fpr);
reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret) {
- return ret;
- }
-
- ret = kvm_put_vcpu_events(cpu);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
}
+ vfp_set_fpcr(env, fpr);
- write_cpustate_to_list(cpu, true);
-
- if (!write_list_to_kvmstate(cpu, level)) {
- return EINVAL;
- }
-
- kvm_arm_sync_mpstate_to_kvm(cpu);
-
- return ret;
+ return 0;
}
int kvm_arch_get_registers(CPUState *cs)
{
struct kvm_one_reg reg;
uint64_t val;
- uint32_t fpr;
unsigned int el;
- int i;
- int ret;
+ int i, ret;
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -949,36 +1012,10 @@ int kvm_arch_get_registers(CPUState *cs)
env->spsr = env->banked_spsr[i];
}
- /* Advanced SIMD and FP registers */
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
- reg.addr = (uintptr_t)q;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (ret) {
- return ret;
- } else {
-#ifdef HOST_WORDS_BIGENDIAN
- uint64_t t;
- t = q[0], q[0] = q[1], q[1] = t;
-#endif
- }
- }
-
- reg.addr = (uintptr_t)(&fpr);
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_arch_get_fpsimd(cs);
if (ret) {
return ret;
}
- vfp_set_fpsr(env, fpr);
-
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (ret) {
- return ret;
- }
- vfp_set_fpcr(env, fpr);
ret = kvm_get_vcpu_events(cpu);
if (ret) {
@@ -986,7 +1023,7 @@ int kvm_arch_get_registers(CPUState *cs)
}
if (!write_kvmstate_to_list(cpu)) {
- return EINVAL;
+ return -EINVAL;
}
/* Note that it's OK to have registers which aren't in CPUState,
* so we can ignore a failure return here.
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 2a07333c61..b3106c8600 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -208,6 +208,24 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
/**
+ * kvm_arm_aarch32_supported:
+ * @cs: CPUState
+ *
+ * Returns: true if the KVM VCPU can enable AArch32 mode
+ * and false otherwise.
+ */
+bool kvm_arm_aarch32_supported(CPUState *cs);
+
+/**
+ * bool kvm_arm_pmu_supported:
+ * @cs: CPUState
+ *
+ * Returns: true if the KVM VCPU can enable its PMU
+ * and false otherwise.
+ */
+bool kvm_arm_pmu_supported(CPUState *cs);
+
+/**
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
* IPA address space supported by KVM
*
@@ -247,6 +265,16 @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
cpu->host_cpu_probe_failed = true;
}
+static inline bool kvm_arm_aarch32_supported(CPUState *cs)
+{
+ return false;
+}
+
+static inline bool kvm_arm_pmu_supported(CPUState *cs)
+{
+ return false;
+}
+
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
{
return -ENOENT;
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 5e1625a1c8..0fd4bd0238 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -135,21 +135,6 @@ uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
return res;
}
-uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
-{
- uint32_t res;
- if (val >= 0x40000000) {
- res = ~SIGNBIT;
- env->QF = 1;
- } else if (val <= (int32_t)0xc0000000) {
- res = SIGNBIT;
- env->QF = 1;
- } else {
- res = val << 1;
- }
- return res;
-}
-
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index d3231477a2..fc3e5f5c38 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -253,40 +253,26 @@ static void gen_exception_internal(int excp)
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
{
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
-
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
- tcg_syn, tcg_el);
- tcg_temp_free_i32(tcg_el);
- tcg_temp_free_i32(tcg_syn);
- tcg_temp_free_i32(tcg_excp);
-}
-
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
-{
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(pc);
gen_exception_internal(excp);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
+static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
uint32_t syndrome, uint32_t target_el)
{
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(pc);
gen_exception(excp, syndrome, target_el);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_bkpt_insn(DisasContext *s, int offset,
- uint32_t syndrome)
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
{
TCGv_i32 tcg_syn;
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(s->pc_curr);
tcg_syn = tcg_const_i32(syndrome);
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
tcg_temp_free_i32(tcg_syn);
@@ -305,8 +291,7 @@ static void gen_step_complete_exception(DisasContext *s)
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
- default_exception_el(s));
+ gen_swstep_exception(s, 1, s->is_ldex);
s->base.is_jmp = DISAS_NORETURN;
}
@@ -353,13 +338,6 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
}
}
-void unallocated_encoding(DisasContext *s)
-{
- /* Unallocated and reserved encodings are uncategorized */
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
-}
-
static void init_tmp_a64_array(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
@@ -1128,8 +1106,8 @@ static inline bool fp_access_check(DisasContext *s)
return true;
}
- gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
- s->fp_excp_el);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
return false;
}
@@ -1139,7 +1117,7 @@ static inline bool fp_access_check(DisasContext *s)
bool sve_access_check(DisasContext *s)
{
if (s->sve_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
s->sve_excp_el);
return false;
}
@@ -1248,11 +1226,11 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
*/
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
{
- uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
if (insn & (1U << 31)) {
/* BL Branch with link */
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
/* B Branch / BL Branch with link */
@@ -1276,7 +1254,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
sf = extract32(insn, 31, 1);
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
rt = extract32(insn, 0, 5);
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
tcg_cmp = read_cpu_reg(s, rt, sf);
label_match = gen_new_label();
@@ -1285,7 +1263,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, label_match);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
}
@@ -1305,7 +1283,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
- addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
rt = extract32(insn, 0, 5);
tcg_cmp = tcg_temp_new_i64();
@@ -1316,7 +1294,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, label_match);
tcg_temp_free_i64(tcg_cmp);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
}
@@ -1336,7 +1314,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
cond = extract32(insn, 0, 4);
reset_btype(s);
@@ -1344,7 +1322,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
/* genuinely conditional branches */
TCGLabel *label_match = gen_new_label();
arm_gen_test_cc(cond, label_match);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
} else {
@@ -1505,7 +1483,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
* any pending interrupts immediately.
*/
reset_btype(s);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
case 7: /* SB */
@@ -1517,7 +1495,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
* MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
default:
@@ -1720,7 +1698,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
TCGv_i32 tcg_syn, tcg_isread;
uint32_t syndrome;
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
tmpptr = tcg_const_ptr(ri);
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
tcg_syn = tcg_const_i32(syndrome);
@@ -1873,8 +1851,8 @@ static void disas_exc(DisasContext *s, uint32_t insn)
switch (op2_ll) {
case 1: /* SVC */
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
- default_exception_el(s));
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
+ syn_aa64_svc(imm16), default_exception_el(s));
break;
case 2: /* HVC */
if (s->current_el == 0) {
@@ -1884,22 +1862,24 @@ static void disas_exc(DisasContext *s, uint32_t insn)
/* The pre HVC helper handles cases when HVC gets trapped
* as an undefined insn by runtime configuration.
*/
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
gen_helper_pre_hvc(cpu_env);
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
+ syn_aa64_hvc(imm16), 2);
break;
case 3: /* SMC */
if (s->current_el == 0) {
unallocated_encoding(s);
break;
}
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
tmp = tcg_const_i32(syn_aa64_smc(imm16));
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
+ syn_aa64_smc(imm16), 3);
break;
default:
unallocated_encoding(s);
@@ -1912,7 +1892,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
break;
}
/* BRK */
- gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
break;
case 2:
if (op2_ll != 0) {
@@ -1936,7 +1916,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
break;
}
#endif
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
} else {
unsupported_encoding(s, insn);
}
@@ -2029,7 +2009,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
gen_a64_set_pc(s, dst);
/* BLR also needs to load return address */
if (opc == 1) {
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
break;
@@ -2056,7 +2036,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
gen_a64_set_pc(s, dst);
/* BLRAA also needs to load return address */
if (opc == 9) {
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
break;
@@ -2615,7 +2595,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
tcg_rt = cpu_reg(s, rt);
- clean_addr = tcg_const_i64((s->pc - 4) + imm);
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
if (is_vector) {
do_fp_ld(s, rt, clean_addr, size);
} else {
@@ -3594,7 +3574,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
offset = sextract64(insn, 5, 19);
offset = offset << 2 | extract32(insn, 29, 2);
rd = extract32(insn, 0, 5);
- base = s->pc - 4;
+ base = s->pc_curr;
if (page) {
/* ADRP (page based) */
@@ -11533,7 +11513,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
break;
default:
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
- __func__, insn, fpopcode, s->pc);
+ __func__, insn, fpopcode, s->pc_curr);
g_assert_not_reached();
}
@@ -14044,9 +14024,10 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
{
uint32_t insn;
- insn = arm_ldl_code(env, s->pc, s->sctlr_b);
+ s->pc_curr = s->base.pc_next;
+ insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
s->insn = insn;
- s->pc += 4;
+ s->base.pc_next += 4;
s->fp_access_checked = false;
@@ -14077,7 +14058,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
if (s->btype != 0
&& s->guarded_page
&& !btype_destination_ok(insn, s->bt, s->btype)) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_btitrap(s->btype),
default_exception_el(s));
return;
}
@@ -14143,7 +14125,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
int bound, core_mmu_idx;
dc->isar = &arm_cpu->isar;
- dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 1;
@@ -14194,7 +14175,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false;
- dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
/* Bound the number of insns to execute to those left on the page. */
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
@@ -14216,7 +14197,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(dc->pc, 0, 0);
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
dc->insn_start = tcg_last_op();
}
@@ -14226,18 +14207,18 @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (bp->flags & BP_CPU) {
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
tb->size below does the right thing. */
- dc->pc += 4;
+ dc->base.pc_next += 4;
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -14261,14 +14242,12 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* bits should be zero.
*/
assert(dc->base.num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
+ gen_swstep_exception(dc, 0, 0);
dc->base.is_jmp = DISAS_NORETURN;
} else {
disas_a64_insn(env, dc);
}
- dc->base.pc_next = dc->pc;
translator_loop_temp_check(&dc->base);
}
@@ -14284,7 +14263,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
*/
switch (dc->base.is_jmp) {
default:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
/* fall through */
case DISAS_EXIT:
case DISAS_JUMP:
@@ -14301,11 +14280,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
switch (dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
break;
default:
case DISAS_UPDATE:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
/* fall through */
case DISAS_EXIT:
tcg_gen_exit_tb(NULL, 0);
@@ -14317,11 +14296,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
case DISAS_SWI:
break;
case DISAS_WFE:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_wfe(cpu_env);
break;
case DISAS_YIELD:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_yield(cpu_env);
break;
case DISAS_WFI:
@@ -14331,7 +14310,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
*/
TCGv_i32 tmp = tcg_const_i32(4);
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_wfi(cpu_env, tmp);
tcg_temp_free_i32(tmp);
/* The helper doesn't necessarily throw an exception, but we
@@ -14342,9 +14321,6 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
}
}
}
-
- /* Functions above can change dc->pc, so re-align db->pc_next */
- dc->base.pc_next = dc->pc;
}
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 9ab40872d8..12ad8ac6ed 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -18,14 +18,12 @@
#ifndef TARGET_ARM_TRANSLATE_A64_H
#define TARGET_ARM_TRANSLATE_A64_H
-void unallocated_encoding(DisasContext *s);
-
#define unsupported_encoding(s, insn) \
do { \
qemu_log_mask(LOG_UNIMP, \
"%s:%d: unsupported instruction encoding 0x%08x " \
"at pc=%016" PRIx64 "\n", \
- __FILE__, __LINE__, insn, s->pc - 4); \
+ __FILE__, __LINE__, insn, s->pc_curr); \
unallocated_encoding(s); \
} while (0)
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index 092eb5ec53..3e8ea80493 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -96,10 +96,10 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
{
if (s->fp_excp_el) {
if (arm_dc_feature(s, ARM_FEATURE_M)) {
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
s->fp_excp_el);
} else {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, false),
s->fp_excp_el);
}
@@ -108,8 +108,7 @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
if (!s->vfp_enabled && !ignore_vfp_enabled) {
assert(!arm_dc_feature(s, ARM_FEATURE_M));
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
return false;
}
@@ -941,14 +940,8 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
offset = -offset;
}
- if (s->thumb && a->rn == 15) {
- /* This is actually UNPREDICTABLE */
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~2);
- } else {
- addr = load_reg(s, a->rn);
- }
- tcg_gen_addi_i32(addr, addr, offset);
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, offset);
tmp = tcg_temp_new_i32();
if (a->l) {
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
@@ -983,14 +976,8 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
offset = -offset;
}
- if (s->thumb && a->rn == 15) {
- /* This is actually UNPREDICTABLE */
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~2);
- } else {
- addr = load_reg(s, a->rn);
- }
- tcg_gen_addi_i32(addr, addr, offset);
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, offset);
tmp = tcg_temp_new_i64();
if (a->l) {
gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
@@ -1029,13 +1016,8 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
return true;
}
- if (s->thumb && a->rn == 15) {
- /* This is actually UNPREDICTABLE */
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~2);
- } else {
- addr = load_reg(s, a->rn);
- }
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, 0);
if (a->p) {
/* pre-decrement */
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
@@ -1112,13 +1094,8 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
return true;
}
- if (s->thumb && a->rn == 15) {
- /* This is actually UNPREDICTABLE */
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~2);
- } else {
- addr = load_reg(s, a->rn);
- }
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, 0);
if (a->p) {
/* pre-decrement */
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 7853462b21..d948757131 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -196,17 +196,17 @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUARMState, name))
+/* The architectural value of PC. */
+static uint32_t read_pc(DisasContext *s)
+{
+ return s->pc_curr + (s->thumb ? 4 : 8);
+}
+
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
{
if (reg == 15) {
- uint32_t addr;
- /* normally, since we updated PC, we need only to add one insn */
- if (s->thumb)
- addr = (long)s->pc + 2;
- else
- addr = (long)s->pc + 4;
- tcg_gen_movi_i32(var, addr);
+ tcg_gen_movi_i32(var, read_pc(s));
} else {
tcg_gen_mov_i32(var, cpu_R[reg]);
}
@@ -220,6 +220,23 @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg)
return tmp;
}
+/*
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
+ * This is used for load/store for which use of PC implies (literal),
+ * or ADD that implies ADR.
+ */
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ if (reg == 15) {
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
+ } else {
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
+ }
+ return tmp;
+}
+
/* Set a CPU register. The source must be a temporary and will be
marked as dead. */
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
@@ -282,20 +299,6 @@ static void gen_exception_internal(int excp)
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
-{
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
-
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
- tcg_syn, tcg_el);
-
- tcg_temp_free_i32(tcg_el);
- tcg_temp_free_i32(tcg_syn);
- tcg_temp_free_i32(tcg_excp);
-}
-
static void gen_step_complete_exception(DisasContext *s)
{
/* We just completed step of an insn. Move from Active-not-pending
@@ -308,8 +311,7 @@ static void gen_step_complete_exception(DisasContext *s)
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
- default_exception_el(s));
+ gen_swstep_exception(s, 1, s->is_ldex);
s->base.is_jmp = DISAS_NORETURN;
}
@@ -374,34 +376,6 @@ static void gen_revsh(TCGv_i32 var)
tcg_gen_ext16s_i32(var, var);
}
-/* Return (b << 32) + a. Mark inputs as dead */
-static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
-{
- TCGv_i64 tmp64 = tcg_temp_new_i64();
-
- tcg_gen_extu_i32_i64(tmp64, b);
- tcg_temp_free_i32(b);
- tcg_gen_shli_i64(tmp64, tmp64, 32);
- tcg_gen_add_i64(a, tmp64, a);
-
- tcg_temp_free_i64(tmp64);
- return a;
-}
-
-/* Return (b << 32) - a. Mark inputs as dead. */
-static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
-{
- TCGv_i64 tmp64 = tcg_temp_new_i64();
-
- tcg_gen_extu_i32_i64(tmp64, b);
- tcg_temp_free_i32(b);
- tcg_gen_shli_i64(tmp64, tmp64, 32);
- tcg_gen_sub_i64(a, tmp64, a);
-
- tcg_temp_free_i64(tmp64);
- return a;
-}
-
/* 32x32->64 multiply. Marks inputs as dead. */
static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
{
@@ -442,11 +416,7 @@ static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
/* Swap low and high halfwords. */
static void gen_swap_half(TCGv_i32 var)
{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_shri_i32(tmp, var, 16);
- tcg_gen_shli_i32(var, var, 16);
- tcg_gen_or_i32(var, var, tmp);
- tcg_temp_free_i32(tmp);
+ tcg_gen_rotri_i32(var, var, 16);
}
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
@@ -603,14 +573,7 @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
static void shifter_out_im(TCGv_i32 var, int shift)
{
- if (shift == 0) {
- tcg_gen_andi_i32(cpu_CF, var, 1);
- } else {
- tcg_gen_shri_i32(cpu_CF, var, shift);
- if (shift != 31) {
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
- }
- }
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
}
/* Shift by immediate. Includes special handling for shift == 0. */
@@ -1034,7 +997,7 @@ static inline void gen_blxns(DisasContext *s, int rm)
* We do however need to set the PC, because the blxns helper reads it.
* The blxns helper may throw an exception.
*/
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
gen_helper_v7m_blxns(cpu_env, var);
tcg_temp_free_i32(var);
s->base.is_jmp = DISAS_EXIT;
@@ -1212,7 +1175,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
* as an undefined insn by runtime configuration (ie before
* the insn really executes).
*/
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_pre_hvc(cpu_env);
/* Otherwise we will treat this as a real exception which
* happens after execution of the insn. (The distinction matters
@@ -1220,7 +1183,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
* for single stepping.)
*/
s->svc_imm = imm16;
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_HVC;
}
@@ -1231,47 +1194,54 @@ static inline void gen_smc(DisasContext *s)
*/
TCGv_i32 tmp;
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmp = tcg_const_i32(syn_aa32_smc());
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_SMC;
}
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception_internal(excp);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
int syn, uint32_t target_el)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception(excp, syn, target_el);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
{
TCGv_i32 tcg_syn;
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, s->pc_curr);
tcg_syn = tcg_const_i32(syn);
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
tcg_temp_free_i32(tcg_syn);
s->base.is_jmp = DISAS_NORETURN;
}
+void unallocated_encoding(DisasContext *s)
+{
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
s->base.is_jmp = DISAS_EXIT;
}
@@ -1294,12 +1264,11 @@ static inline void gen_hlt(DisasContext *s, int imm)
s->current_el != 0 &&
#endif
(imm == (s->thumb ? 0x3c : 0xf000))) {
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
return;
}
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
@@ -1777,8 +1746,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
iwmmxt_load_reg(cpu_V0, wrd);
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
} else { /* TMCRR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
iwmmxt_store_reg(cpu_V0, wrd);
@@ -2823,8 +2791,7 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) { /* MRA */
iwmmxt_load_reg(cpu_V0, acc);
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
} else { /* MAR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
@@ -2907,7 +2874,7 @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
@@ -3175,7 +3142,8 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
undef:
/* If we get here then some access check did not pass */
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_uncategorized(), exc_target);
return false;
}
@@ -3190,7 +3158,7 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
/* Sync state because msr_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = load_reg(s, rn);
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
@@ -3212,7 +3180,7 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
/* Sync state because mrs_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = tcg_temp_new_i32();
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
@@ -3277,17 +3245,17 @@ static void gen_nop_hint(DisasContext *s, int val)
*/
case 1: /* yield */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_YIELD;
}
break;
case 3: /* wfi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFE;
}
break;
@@ -3569,7 +3537,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -4840,7 +4808,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -6020,8 +5988,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
break;
case 2:
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
@@ -6035,8 +6002,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
break;
case 2:
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
- tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrh_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
@@ -6968,7 +6934,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -7091,7 +7057,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
off_rm = vfp_reg_offset(0, rm);
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -7219,7 +7185,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
}
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmpptr = tcg_const_ptr(ri);
tcg_syn = tcg_const_i32(syndrome);
tcg_isread = tcg_const_i32(isread);
@@ -7238,7 +7204,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
if (isread) {
return 1;
}
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
return 0;
default:
@@ -7269,9 +7235,8 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
tmp = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
- tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
@@ -7380,8 +7345,7 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rlow, tmp);
tmp = tcg_temp_new_i32();
- tcg_gen_shri_i64(val, val, 32);
- tcg_gen_extrl_i64_i32(tmp, val);
+ tcg_gen_extrh_i64_i32(tmp, val);
store_reg(s, rhigh, tmp);
}
@@ -7584,7 +7548,7 @@ static void gen_srs(DisasContext *s,
* For the UNPREDICTABLE cases we choose to UNDEF.
*/
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
return;
}
@@ -7620,8 +7584,7 @@ static void gen_srs(DisasContext *s,
}
if (undef) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
return;
}
@@ -7629,7 +7592,7 @@ static void gen_srs(DisasContext *s,
tmp = tcg_const_i32(mode);
/* get_r13_banked() will raise an exception if called from System mode */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
switch (amode) {
@@ -7711,7 +7674,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
* UsageFault exception.
*/
if (arm_dc_feature(s, ARM_FEATURE_M)) {
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
default_exception_el(s));
return;
}
@@ -7802,7 +7765,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
* self-modifying code correctly and also to take
* any pending interrupts immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
@@ -7813,7 +7776,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
default:
goto illegal_op;
@@ -7868,16 +7831,14 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* branch link and change to thumb (blx <offset>) */
int32_t offset;
- val = (uint32_t)s->pc;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
/* Sign-extend the 24-bit offset */
offset = (((int32_t)insn) << 8) >> 8;
+ val = read_pc(s);
/* offset * 4 + bit24 * 2 + (thumb bit) */
val += (offset << 2) | ((insn >> 23) & 2) | 1;
- /* pipeline offset */
- val += 4;
/* protected by ARCH(5); above, near the start of uncond block */
gen_bx_im(s, val);
return;
@@ -7965,8 +7926,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* CPSR = immediate */
val = insn & 0xff;
shift = ((insn >> 8) & 0xf) * 2;
- if (shift)
- val = (val >> shift) | (val << (32 - shift));
+ val = ror32(val, shift);
i = ((insn & (1 << 22)) != 0);
if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
i, val)) {
@@ -8056,7 +8016,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* branch link/exchange thumb (blx) */
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
@@ -8101,7 +8061,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
if (op1 & 1)
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
@@ -8139,7 +8099,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
case 1:
/* bkpt */
ARCH(5);
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
break;
case 2:
/* Hypervisor call (v7) */
@@ -8229,9 +8189,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* immediate operand */
val = insn & 0xff;
shift = ((insn >> 8) & 0xf) * 2;
- if (shift) {
- val = (val >> shift) | (val << (32 - shift));
- }
+ val = ror32(val, shift);
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
if (logic_cc && shift) {
@@ -8755,19 +8713,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
shift = (insn >> 7) & 0x1f;
if (insn & (1 << 6)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
@@ -8815,8 +8770,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
shift = (insn >> 10) & 3;
/* ??? In many cases it's not necessary to do a
rotate, a shift is sufficient. */
- if (shift != 0)
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
op1 = (insn >> 20) & 7;
switch (op1) {
case 0: gen_sxtb16(tmp); break;
@@ -8869,23 +8823,27 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
(SMMUL, SMMLA, SMMLS) */
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rs);
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
if (rd != 15) {
- tmp = load_reg(s, rd);
+ tmp3 = load_reg(s, rd);
if (insn & (1 << 6)) {
- tmp64 = gen_subq_msw(tmp64, tmp);
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
} else {
- tmp64 = gen_addq_msw(tmp64, tmp);
+ tcg_gen_add_i32(tmp, tmp, tmp3);
}
+ tcg_temp_free_i32(tmp3);
}
if (insn & (1 << 5)) {
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ /*
+ * Adding 0x80000000 to the 64-bit quantity
+ * means that we have carry in to the high
+ * word when the low word has the high bit set.
+ */
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
}
- tcg_gen_shri_i64(tmp64, tmp64, 32);
- tmp = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tmp, tmp64);
- tcg_temp_free_i64(tmp64);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rn, tmp);
break;
case 0:
@@ -9153,10 +9111,8 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
} else {
/* store */
if (i == 15) {
- /* special case: r15 = PC + 8 */
- val = (long)s->pc + 4;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, read_pc(s));
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
@@ -9222,15 +9178,13 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
int32_t offset;
/* branch (and link) */
- val = (int32_t)s->pc;
if (insn & (1 << 24)) {
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
}
offset = sextract32(insn << 2, 0, 26);
- val += offset + 4;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
}
break;
case 0xc:
@@ -9248,24 +9202,23 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
break;
case 0xf:
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 24);
s->base.is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
break;
}
}
}
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
{
- /* Return true if this is a 16 bit instruction. We must be precise
- * about this (matching the decode). We assume that s->pc still
- * points to the first 16 bits of the insn.
+ /*
+ * Return true if this is a 16 bit instruction. We must be precise
+ * about this (matching the decode).
*/
if ((insn >> 11) < 0x1d) {
/* Definitely a 16-bit instruction */
@@ -9285,7 +9238,7 @@ static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
return false;
}
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
* is not on the next page; we merge this into a 32-bit
* insn.
@@ -9478,16 +9431,12 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
*/
bool wback = extract32(insn, 21, 1);
- if (rn == 15) {
- if (insn & (1 << 21)) {
- /* UNPREDICTABLE */
- goto illegal_op;
- }
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~3);
- } else {
- addr = load_reg(s, rn);
+ if (rn == 15 && (insn & (1 << 21))) {
+ /* UNPREDICTABLE */
+ goto illegal_op;
}
+
+ addr = add_reg_for_lit(s, rn, 0);
offset = (insn & 0xff) * 4;
if ((insn & (1 << 23)) == 0) {
offset = -offset;
@@ -9588,12 +9537,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tcg_temp_free_i32(addr);
} else if ((insn & (7 << 5)) == 0) {
/* Table Branch. */
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc);
- } else {
- addr = load_reg(s, rn);
- }
+ addr = load_reg(s, rn);
tmp = load_reg(s, rm);
tcg_gen_add_i32(addr, addr, tmp);
if (insn & (1 << 4)) {
@@ -9609,7 +9553,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
- tcg_gen_addi_i32(tmp, tmp, s->pc);
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
store_reg(s, 15, tmp);
} else {
bool is_lasr = false;
@@ -9817,19 +9761,16 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
if (insn & (1 << 5)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else {
@@ -9910,8 +9851,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
shift = (insn >> 4) & 3;
/* ??? In many cases it's not necessary to do a
rotate, a shift is sufficient. */
- if (shift != 0)
- tcg_gen_rotri_i32(tmp, tmp, shift * 8);
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
op = (insn >> 20) & 7;
switch (op) {
case 0: gen_sxth(tmp); break;
@@ -9958,7 +9898,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, cpu_env, tmp);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
if (op & 2)
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
@@ -10129,22 +10069,26 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
}
break;
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_muls2_i32(tmp2, tmp, tmp, tmp2);
if (rs != 15) {
- tmp = load_reg(s, rs);
+ tmp3 = load_reg(s, rs);
if (insn & (1 << 20)) {
- tmp64 = gen_addq_msw(tmp64, tmp);
+ tcg_gen_add_i32(tmp, tmp, tmp3);
} else {
- tmp64 = gen_subq_msw(tmp64, tmp);
+ tcg_gen_sub_i32(tmp, tmp, tmp3);
}
+ tcg_temp_free_i32(tmp3);
}
if (insn & (1 << 4)) {
- tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ /*
+ * Adding 0x80000000 to the 64-bit quantity
+ * means that we have carry in to the high
+ * word when the low word has the high bit set.
+ */
+ tcg_gen_shri_i32(tmp2, tmp2, 31);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
}
- tcg_gen_shri_i64(tmp64, tmp64, 32);
- tmp = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tmp, tmp64);
- tcg_temp_free_i64(tmp64);
+ tcg_temp_free_i32(tmp2);
break;
case 7: /* Unsigned sum of absolute differences. */
gen_helper_usad8(tmp, tmp, tmp2);
@@ -10288,7 +10232,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
}
/* All other insns: NOCP */
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
default_exception_el(s));
break;
}
@@ -10339,10 +10283,10 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 14)) {
/* Branch and link. */
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
}
- offset += s->pc;
+ offset += read_pc(s);
if (insn & (1 << 12)) {
/* b/bl */
gen_jmp(s, offset);
@@ -10462,7 +10406,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
* and also to take any pending interrupts
* immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
@@ -10473,7 +10417,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
default:
goto illegal_op;
@@ -10583,7 +10527,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
offset |= (insn & (1 << 11)) << 8;
/* jump to the offset */
- gen_jmp(s, s->pc + offset);
+ gen_jmp(s, read_pc(s) + offset);
}
} else {
/*
@@ -10638,11 +10582,10 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
case 7:
goto illegal_op;
default: /* Saturate. */
- if (shift) {
- if (op & 1)
- tcg_gen_sari_i32(tmp, tmp, shift);
- else
- tcg_gen_shli_i32(tmp, tmp, shift);
+ if (op & 1) {
+ tcg_gen_sari_i32(tmp, tmp, shift);
+ } else {
+ tcg_gen_shli_i32(tmp, tmp, shift);
}
tmp2 = tcg_const_i32(imm);
if (op & 4) {
@@ -10693,27 +10636,15 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
store_reg(s, rd, tmp);
} else {
/* Add/sub 12-bit immediate. */
- if (rn == 15) {
- offset = s->pc & ~(uint32_t)3;
- if (insn & (1 << 23))
- offset -= imm;
- else
- offset += imm;
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, offset);
- store_reg(s, rd, tmp);
+ if (insn & (1 << 23)) {
+ imm = -imm;
+ }
+ tmp = add_reg_for_lit(s, rn, imm);
+ if (rn == 13 && rd == 13) {
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
+ store_sp_checked(s, tmp);
} else {
- tmp = load_reg(s, rn);
- if (insn & (1 << 23))
- tcg_gen_subi_i32(tmp, tmp, imm);
- else
- tcg_gen_addi_i32(tmp, tmp, imm);
- if (rn == 13 && rd == 13) {
- /* ADD SP, SP, imm or SUB SP, SP, imm */
- store_sp_checked(s, tmp);
- } else {
- store_reg(s, rd, tmp);
- }
+ store_reg(s, rd, tmp);
}
}
}
@@ -10827,61 +10758,51 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
}
}
memidx = get_mem_index(s);
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- /* PC relative. */
- /* s->pc has already been incremented by 4. */
- imm = s->pc & 0xfffffffc;
- if (insn & (1 << 23))
- imm += insn & 0xfff;
- else
- imm -= insn & 0xfff;
- tcg_gen_movi_i32(addr, imm);
+ imm = insn & 0xfff;
+ if (insn & (1 << 23)) {
+ /* PC relative or Positive offset. */
+ addr = add_reg_for_lit(s, rn, imm);
+ } else if (rn == 15) {
+ /* PC relative with negative offset. */
+ addr = add_reg_for_lit(s, rn, -imm);
} else {
addr = load_reg(s, rn);
- if (insn & (1 << 23)) {
- /* Positive offset. */
- imm = insn & 0xfff;
- tcg_gen_addi_i32(addr, addr, imm);
- } else {
- imm = insn & 0xff;
- switch ((insn >> 8) & 0xf) {
- case 0x0: /* Shifted Register. */
- shift = (insn >> 4) & 0xf;
- if (shift > 3) {
- tcg_temp_free_i32(addr);
- goto illegal_op;
- }
- tmp = load_reg(s, rm);
- if (shift)
- tcg_gen_shli_i32(tmp, tmp, shift);
- tcg_gen_add_i32(addr, addr, tmp);
- tcg_temp_free_i32(tmp);
- break;
- case 0xc: /* Negative offset. */
- tcg_gen_addi_i32(addr, addr, -imm);
- break;
- case 0xe: /* User privilege. */
- tcg_gen_addi_i32(addr, addr, imm);
- memidx = get_a32_user_mem_index(s);
- break;
- case 0x9: /* Post-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xb: /* Post-increment. */
- postinc = 1;
- writeback = 1;
- break;
- case 0xd: /* Pre-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xf: /* Pre-increment. */
- writeback = 1;
- break;
- default:
+ imm = insn & 0xff;
+ switch ((insn >> 8) & 0xf) {
+ case 0x0: /* Shifted Register. */
+ shift = (insn >> 4) & 0xf;
+ if (shift > 3) {
tcg_temp_free_i32(addr);
goto illegal_op;
}
+ tmp = load_reg(s, rm);
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0xc: /* Negative offset. */
+ tcg_gen_addi_i32(addr, addr, -imm);
+ break;
+ case 0xe: /* User privilege. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ memidx = get_a32_user_mem_index(s);
+ break;
+ case 0x9: /* Post-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xb: /* Post-increment. */
+ postinc = 1;
+ writeback = 1;
+ break;
+ case 0xd: /* Pre-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xf: /* Pre-increment. */
+ writeback = 1;
+ break;
+ default:
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
}
}
@@ -10972,8 +10893,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
}
return;
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
@@ -11077,10 +10997,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 11)) {
rd = (insn >> 8) & 7;
/* load pc-relative. Bit 1 of PC is ignored. */
- val = s->pc + 2 + ((insn & 0xff) * 4);
- val &= ~(uint32_t)2;
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, val);
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
tmp = tcg_temp_new_i32();
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
rd | ISSIs16Bit);
@@ -11157,7 +11074,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
/* BLX/BX */
tmp = load_reg(s, rm);
if (link) {
- val = (uint32_t)s->pc | 1;
+ val = (uint32_t)s->base.pc_next | 1;
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
store_reg(s, 14, tmp2);
@@ -11458,16 +11375,8 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
* - Add PC/SP (immediate)
*/
rd = (insn >> 8) & 7;
- if (insn & (1 << 11)) {
- /* SP */
- tmp = load_reg(s, 13);
- } else {
- /* PC. bit 1 is ignored. */
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
- }
val = (insn & 0xff) * 4;
- tcg_gen_addi_i32(tmp, tmp, val);
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
store_reg(s, rd, tmp);
break;
@@ -11584,9 +11493,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
tcg_temp_free_i32(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
- val = (uint32_t)s->pc + 2;
- val += offset;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
break;
case 15: /* IT, nop-hint. */
@@ -11611,7 +11518,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
break;
}
@@ -11741,7 +11648,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
if (cond == 0xf) {
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 8);
s->base.is_jmp = DISAS_SWI;
break;
@@ -11750,7 +11657,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
arm_skip_unless(s, cond);
/* jump to the offset */
- val = (uint32_t)s->pc + 2;
+ val = read_pc(s);
offset = ((int32_t)insn << 24) >> 24;
val += offset << 1;
gen_jmp(s, val);
@@ -11770,15 +11677,15 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
}
/* unconditional branch */
- val = (uint32_t)s->pc;
+ val = read_pc(s);
offset = ((int32_t)insn << 21) >> 21;
- val += (offset << 1) + 2;
+ val += offset << 1;
gen_jmp(s, val);
break;
@@ -11795,36 +11702,35 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
} else {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
}
break;
}
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
{
- /* Return true if the insn at dc->pc might cross a page boundary.
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
* (False positives are OK, false negatives are not.)
* We know this is a Thumb insn, and our caller ensures we are
- * only called if dc->pc is less than 4 bytes from the page
+ * only called if dc->base.pc_next is less than 4 bytes from the page
* boundary, so we cross the page if the first 16 bits indicate
* that this is a 32 bit insn.
*/
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
- return !thumb_insn_is_16bit(s, insn);
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
}
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
@@ -11836,7 +11742,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
uint32_t condexec, core_mmu_idx;
dc->isar = &cpu->isar;
- dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 0;
@@ -11897,7 +11802,9 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false;
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
+ }
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
@@ -11966,7 +11873,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(dc->pc,
+ tcg_gen_insn_start(dc->base.pc_next,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
0);
dc->insn_start = tcg_last_op();
@@ -11979,12 +11886,12 @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it's likely not going to be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
@@ -11992,7 +11899,7 @@ static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
tb->size below does the right thing. */
/* TODO: Advance PC by correct instruction length to
* avoid disassembler error messages */
- dc->pc += 2;
+ dc->base.pc_next += 2;
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -12003,7 +11910,7 @@ static bool arm_pre_translate_insn(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
- if (dc->pc >= 0xffff0000) {
+ if (dc->base.pc_next >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_KERNEL_TRAP);
@@ -12024,8 +11931,7 @@ static bool arm_pre_translate_insn(DisasContext *dc)
* bits should be zero.
*/
assert(dc->base.num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
+ gen_swstep_exception(dc, 0, 0);
dc->base.is_jmp = DISAS_NORETURN;
return true;
}
@@ -12039,7 +11945,6 @@ static void arm_post_translate_insn(DisasContext *dc)
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
- dc->base.pc_next = dc->pc;
translator_loop_temp_check(&dc->base);
}
@@ -12053,9 +11958,10 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
return;
}
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
dc->insn = insn;
- dc->pc += 4;
+ dc->base.pc_next += 4;
disas_arm_insn(dc, insn);
arm_post_translate_insn(dc);
@@ -12121,14 +12027,15 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
return;
}
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
- is_16bit = thumb_insn_is_16bit(dc, insn);
- dc->pc += 2;
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
+ dc->base.pc_next += 2;
if (!is_16bit) {
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
insn = insn << 16 | insn2;
- dc->pc += 2;
+ dc->base.pc_next += 2;
}
dc->insn = insn;
@@ -12176,8 +12083,8 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* but isn't very efficient).
*/
if (dc->base.is_jmp == DISAS_NEXT
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
&& insn_crosses_page(env, dc)))) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
@@ -12222,7 +12129,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
case DISAS_NEXT:
case DISAS_TOO_MANY:
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
@@ -12243,13 +12150,13 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
switch(dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
break;
case DISAS_JUMP:
gen_goto_ptr();
break;
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* indicate that the hash table must be used to find the next TB */
@@ -12295,15 +12202,12 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
gen_set_label(dc->condlabel);
gen_set_condexec(dc);
if (unlikely(is_singlestepping(dc))) {
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_singlestep_exception(dc);
} else {
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
}
}
-
- /* Functions above can change dc->pc, so re-align db->pc_next */
- dc->base.pc_next = dc->pc;
}
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
diff --git a/target/arm/translate.h b/target/arm/translate.h
index a20f6e2056..92ef790be9 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -2,6 +2,7 @@
#define TARGET_ARM_TRANSLATE_H
#include "exec/translator.h"
+#include "internals.h"
/* internal defines */
@@ -9,7 +10,8 @@ typedef struct DisasContext {
DisasContextBase base;
const ARMISARegisters *isar;
- target_ulong pc;
+ /* The address of the current instruction being translated. */
+ target_ulong pc_curr;
target_ulong page_start;
uint32_t insn;
/* Nonzero if this instruction has been conditionally skipped. */
@@ -49,6 +51,8 @@ typedef struct DisasContext {
uint32_t svc_imm;
int aarch64;
int current_el;
+ /* Debug target exception level for single-step exceptions */
+ int debug_target_el;
GHashTable *cp_regs;
uint64_t features; /* CPU features bits */
/* Because unallocated encodings generate different exception syndrome
@@ -69,8 +73,6 @@ typedef struct DisasContext {
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
*/
bool is_ldex;
- /* True if a single-step exception will be taken to the current EL */
- bool ss_same_el;
/* True if v8.3-PAuth is active. */
bool pauth_active;
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
@@ -97,6 +99,8 @@ typedef struct DisasCompare {
bool value_global;
} DisasCompare;
+void unallocated_encoding(DisasContext *s);
+
/* Share the TCG temporaries common between 32 and 64 bit modes. */
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
extern TCGv_i64 cpu_exclusive_addr;
@@ -232,6 +236,35 @@ static inline void gen_ss_advance(DisasContext *s)
}
}
+static inline void gen_exception(int excp, uint32_t syndrome,
+ uint32_t target_el)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+
+ tcg_temp_free_i32(tcg_el);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+/* Generate an architectural singlestep exception */
+static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
+{
+ bool same_el = (s->debug_target_el == s->current_el);
+
+ /*
+ * If singlestep is targeting a lower EL than the current one,
+ * then s->ss_active must be false and we can never get here.
+ */
+ assert(s->debug_target_el >= s->current_el);
+
+ gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
+}
+
/*
* Given a VFP floating point constant encoded into an 8 bit immediate in an
* instruction, expand it to the actual constant value of the specified