summaryrefslogtreecommitdiffstats
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c101
1 files changed, 33 insertions, 68 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 96301930cc..0da1424f72 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3155,7 +3155,7 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
if (tlb_force_broadcast(env)) {
- tlbi_aa64_vmalle1_write(env, NULL, value);
+ tlbi_aa64_vmalle1is_write(env, NULL, value);
return;
}
@@ -3931,7 +3931,6 @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
- CPUState *cs = ENV_GET_CPU(env);
uint64_t valid_mask = HCR_MASK;
if (arm_feature(env, ARM_FEATURE_EL3)) {
@@ -3950,28 +3949,6 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Clear RES0 bits. */
value &= valid_mask;
- /*
- * VI and VF are kept in cs->interrupt_request. Modifying that
- * requires that we have the iothread lock, which is done by
- * marking the reginfo structs as ARM_CP_IO.
- * Note that if a write to HCR pends a VIRQ or VFIQ it is never
- * possible for it to be taken immediately, because VIRQ and
- * VFIQ are masked unless running at EL0 or EL1, and HCR
- * can only be written at EL2.
- */
- g_assert(qemu_mutex_iothread_locked());
- if (value & HCR_VI) {
- cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
- } else {
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- }
- if (value & HCR_VF) {
- cs->interrupt_request |= CPU_INTERRUPT_VFIQ;
- } else {
- cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ;
- }
- value &= ~(HCR_VI | HCR_VF);
-
/* These bits change the MMU setup:
* HCR_VM enables stage 2 translation
* HCR_PTW forbids certain page-table setups
@@ -3981,6 +3958,21 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
tlb_flush(CPU(cpu));
}
env->cp15.hcr_el2 = value;
+
+ /*
+ * Updates to VI and VF require us to update the status of
+ * virtual interrupts, which are the logical OR of these bits
+ * and the state of the input lines from the GIC. (This requires
+ * that we have the iothread lock, which is done by marking the
+ * reginfo structs as ARM_CP_IO.)
+ * Note that if a write to HCR pends a VIRQ or VFIQ it is never
+ * possible for it to be taken immediately, because VIRQ and
+ * VFIQ are masked unless running at EL0 or EL1, and HCR
+ * can only be written at EL2.
+ */
+ g_assert(qemu_mutex_iothread_locked());
+ arm_cpu_update_virq(cpu);
+ arm_cpu_update_vfiq(cpu);
}
static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3999,32 +3991,17 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
hcr_write(env, NULL, value);
}
-static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* The VI and VF bits live in cs->interrupt_request */
- uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF);
- CPUState *cs = ENV_GET_CPU(env);
-
- if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
- ret |= HCR_VI;
- }
- if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
- ret |= HCR_VF;
- }
- return ret;
-}
-
static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_IO,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
- .writefn = hcr_write, .readfn = hcr_read },
+ .writefn = hcr_write },
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
.type = ARM_CP_ALIAS | ARM_CP_IO,
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
- .writefn = hcr_writelow, .readfn = hcr_read },
+ .writefn = hcr_writelow },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
@@ -6455,13 +6432,14 @@ static void switch_mode(CPUARMState *env, int mode)
i = bank_number(old_mode);
env->banked_r13[i] = env->regs[13];
- env->banked_r14[i] = env->regs[14];
env->banked_spsr[i] = env->spsr;
i = bank_number(mode);
env->regs[13] = env->banked_r13[i];
- env->regs[14] = env->banked_r14[i];
env->spsr = env->banked_spsr[i];
+
+ env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
+ env->regs[14] = env->banked_r14[r14_bank_number(mode)];
}
/* Physical Interrupt Target EL Lookup Table
@@ -8040,7 +8018,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
if (mode == ARM_CPU_MODE_HYP) {
env->xregs[14] = env->regs[14];
} else {
- env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
+ env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
}
}
@@ -8054,7 +8032,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[16] = env->regs[14];
env->xregs[17] = env->regs[13];
} else {
- env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
}
@@ -8062,7 +8040,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[18] = env->regs[14];
env->xregs[19] = env->regs[13];
} else {
- env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
}
@@ -8070,7 +8048,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[20] = env->regs[14];
env->xregs[21] = env->regs[13];
} else {
- env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
}
@@ -8078,7 +8056,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[22] = env->regs[14];
env->xregs[23] = env->regs[13];
} else {
- env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
}
@@ -8095,7 +8073,7 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[i] = env->fiq_regs[i - 24];
}
env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
- env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
+ env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
}
env->pc = env->regs[15];
@@ -8145,7 +8123,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
if (mode == ARM_CPU_MODE_HYP) {
env->regs[14] = env->xregs[14];
} else {
- env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
}
}
@@ -8159,7 +8137,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[14] = env->xregs[16];
env->regs[13] = env->xregs[17];
} else {
- env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
}
@@ -8167,7 +8145,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[14] = env->xregs[18];
env->regs[13] = env->xregs[19];
} else {
- env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
}
@@ -8175,7 +8153,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[14] = env->xregs[20];
env->regs[13] = env->xregs[21];
} else {
- env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
}
@@ -8183,7 +8161,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[14] = env->xregs[22];
env->regs[13] = env->xregs[23];
} else {
- env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
}
@@ -8200,7 +8178,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->fiq_regs[i - 24] = env->xregs[i];
}
env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
- env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
}
env->regs[15] = env->pc;
@@ -8378,7 +8356,6 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
return;
}
- /* TODO: Vectored interrupt controller. */
switch (cs->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
@@ -10560,18 +10537,6 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
txattrs, prot, &mpu_is_subpage, fi, NULL);
- /*
- * TODO: this is a temporary hack to ignore the fact that the SAU region
- * is smaller than a page if this is an executable region. We never
- * supported small MPU regions, but we did (accidentally) allow small
- * SAU regions, and if we now made small SAU regions not be executable
- * then this would break previously working guest code. We can't
- * remove this until/unless we implement support for execution from
- * small regions.
- */
- if (*prot & PAGE_EXEC) {
- sattrs.subpage = false;
- }
*page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
return ret;
}