summaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/helper.c49
-rw-r--r--target/arm/helper.h1
-rw-r--r--target/arm/kvm.c14
-rw-r--r--target/arm/kvm32.c15
-rw-r--r--target/arm/kvm64.c15
-rw-r--r--target/arm/kvm_arm.h3
-rw-r--r--target/arm/translate-a64.c11
-rw-r--r--target/arm/translate.c14
8 files changed, 98 insertions, 24 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index f91e5d5345..b61ee73d18 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -11780,7 +11780,40 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
/* Definitely a real MMU, not an MPU */
if (regime_translation_disabled(env, mmu_idx)) {
- /* MMU disabled. */
+ /*
+ * MMU disabled. S1 addresses within aa64 translation regimes are
+ * still checked for bounds -- see AArch64.TranslateAddressS1Off.
+ */
+ if (mmu_idx != ARMMMUIdx_Stage2) {
+ int r_el = regime_el(env, mmu_idx);
+ if (arm_el_is_aa64(env, r_el)) {
+ int pamax = arm_pamax(env_archcpu(env));
+ uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ int addrtop, tbi;
+
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (access_type == MMU_INST_FETCH) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
+ addrtop = (tbi ? 55 : 63);
+
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
+ fi->type = ARMFault_AddressSize;
+ fi->level = 0;
+ fi->stage2 = false;
+ return 1;
+ }
+
+ /*
+ * When TBI is disabled, we've just validated that all of the
+ * bits above PAMax are zero, so logically we only need to
+ * clear the top byte for TBI. But it's clearer to follow
+ * the pseudocode set of addrdesc.paddress.
+ */
+ address = extract64(address, 0, 52);
+ }
+ }
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*page_size = TARGET_PAGE_SIZE;
@@ -12468,6 +12501,18 @@ void arm_rebuild_hflags(CPUARMState *env)
env->hflags = rebuild_hflags_internal(env);
}
+/*
+ * If we have triggered a EL state change we can't rely on the
+ * translator having passed it to us, we need to recompute.
+ */
+void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
+}
+
void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
{
int fp_el = fp_exception_el(env, el);
@@ -12478,7 +12523,7 @@ void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
/*
* If we have triggered a EL state change we can't rely on the
- * translator having passed it too us, we need to recompute.
+ * translator having passed it to us, we need to recompute.
*/
void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
{
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 72eb9e6a1a..f37b8670a5 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -90,6 +90,7 @@ DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
DEF_HELPER_2(get_user_reg, i32, env, i32)
DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 85860e6f95..390077c518 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -874,15 +874,17 @@ int kvm_arch_irqchip_create(KVMState *s)
int kvm_arm_vgic_probe(void)
{
+ int val = 0;
+
if (kvm_create_device(kvm_state,
KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
- return 3;
- } else if (kvm_create_device(kvm_state,
- KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
- return 2;
- } else {
- return 0;
+ val |= KVM_ARM_VGIC_V3;
+ }
+ if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
+ val |= KVM_ARM_VGIC_V2;
}
+ return val;
}
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
index f703c4fcad..f271181ab8 100644
--- a/target/arm/kvm32.c
+++ b/target/arm/kvm32.c
@@ -409,17 +409,22 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
- ret = kvm_put_vcpu_events(cpu);
- if (ret) {
- return ret;
- }
-
write_cpustate_to_list(cpu, true);
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
+ /*
+ * Setting VCPU events should be triggered after syncing the registers
+ * to avoid overwriting potential changes made by KVM upon calling
+ * KVM_SET_VCPU_EVENTS ioctl
+ */
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
kvm_arm_sync_mpstate_to_kvm(cpu);
return ret;
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 93ba1448da..be5b31c2b0 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -1094,17 +1094,22 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
- ret = kvm_put_vcpu_events(cpu);
- if (ret) {
- return ret;
- }
-
write_cpustate_to_list(cpu, true);
if (!write_list_to_kvmstate(cpu, level)) {
return -EINVAL;
}
+ /*
+ * Setting VCPU events should be triggered after syncing the registers
+ * to avoid overwriting potential changes made by KVM upon calling
+ * KVM_SET_VCPU_EVENTS ioctl
+ */
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
kvm_arm_sync_mpstate_to_kvm(cpu);
return ret;
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index ae9e075d75..48bf5e16d5 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -15,6 +15,9 @@
#include "exec/memory.h"
#include "qemu/error-report.h"
+#define KVM_ARM_VGIC_V2 (1 << 0)
+#define KVM_ARM_VGIC_V3 (1 << 1)
+
/**
* kvm_arm_vcpu_init:
* @cs: CPUState
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fefe8af7f5..8fffb52203 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -228,7 +228,18 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
{
TCGv_i64 clean = new_tmp_a64(s);
+ /*
+ * In order to get the correct value in the FAR_ELx register,
+ * we must present the memory subsystem with the "dirty" address
+ * including the TBI. In system mode we can make this work via
+ * the TLB, dropping the TBI during translation. But for user-only
+ * mode we don't have that option, and must remove the top byte now.
+ */
+#ifdef CONFIG_USER_ONLY
gen_top_byte_ignore(s, clean, addr, s->tbid);
+#else
+ tcg_gen_mov_i64(clean, addr);
+#endif
return clean;
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 6259064ea7..9f9f4e19e0 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -7296,7 +7296,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/*
- * A write to any coprocessor regiser that ends a TB
+ * A write to any coprocessor register that ends a TB
* must rebuild the hflags for the next TB.
*/
TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
@@ -8551,7 +8551,7 @@ static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
{
- TCGv_i32 addr, reg, el;
+ TCGv_i32 addr, reg;
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
return false;
@@ -8561,9 +8561,8 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
gen_helper_v7m_msr(cpu_env, addr, reg);
tcg_temp_free_i32(addr);
tcg_temp_free_i32(reg);
- el = tcg_const_i32(s->current_el);
- gen_helper_rebuild_hflags_m32(cpu_env, el);
- tcg_temp_free_i32(el);
+ /* If we wrote to CONTROL, the EL might have changed */
+ gen_helper_rebuild_hflags_m32_newel(cpu_env);
gen_lookup_tb(s);
return true;
}
@@ -10590,7 +10589,7 @@ static bool trans_CPS(DisasContext *s, arg_CPS *a)
static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
{
- TCGv_i32 tmp, addr;
+ TCGv_i32 tmp, addr, el;
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
return false;
@@ -10613,6 +10612,9 @@ static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
gen_helper_v7m_msr(cpu_env, addr, tmp);
tcg_temp_free_i32(addr);
}
+ el = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_m32(cpu_env, el);
+ tcg_temp_free_i32(el);
tcg_temp_free_i32(tmp);
gen_lookup_tb(s);
return true;