diff options
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r-- | target/arm/helper.c | 339 |
1 files changed, 240 insertions, 99 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index c9bce1efcb..088f452716 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -3754,11 +3754,11 @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, .access = PL2_RW, .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, - { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, + { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_NO_RAW, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, - .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, + .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, .access = PL2_RW, @@ -3857,6 +3857,15 @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { REGINFO_SENTINEL }; +/* Ditto, but for registers which exist in ARMv8 but not v7 */ +static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { + { .name = "HCR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL2_RW, + .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -3883,10 +3892,26 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) * HCR_PTW forbids certain page-table setups * HCR_DC Disables stage1 and enables stage2 translation */ - if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { + if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { tlb_flush(CPU(cpu)); } - raw_write(env, ri, value); + env->cp15.hcr_el2 = value; +} + +static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ + value = deposit64(env->cp15.hcr_el2, 32, 32, value); + hcr_write(env, NULL, value); +} + +static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Handle HCR write, i.e. write to low half of HCR_EL2 */ + value = deposit64(env->cp15.hcr_el2, 0, 32, value); + hcr_write(env, NULL, value); } static const ARMCPRegInfo el2_cp_reginfo[] = { @@ -3894,6 +3919,11 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .writefn = hcr_write }, + { .name = "HCR", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .writefn = hcr_writelow }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, @@ -4128,6 +4158,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { REGINFO_SENTINEL }; +static const ARMCPRegInfo el2_v8_cp_reginfo[] = { + { .name = "HCR2", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL2_RW, + .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), + .writefn = hcr_writehigh }, + REGINFO_SENTINEL +}; + static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -5179,6 +5219,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) }; define_arm_cp_regs(cpu, vpidr_regs); define_arm_cp_regs(cpu, el2_cp_reginfo); + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, el2_v8_cp_reginfo); + } /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ if (!arm_feature(env, ARM_FEATURE_EL3)) { ARMCPRegInfo rvbar = { @@ -5211,6 +5254,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) }; define_arm_cp_regs(cpu, vpidr_regs); define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); + } } } if (arm_feature(env, ARM_FEATURE_EL3)) { @@ -5459,6 +5505,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) REGINFO_SENTINEL }; define_arm_cp_regs(cpu, auxcr_reginfo); + if (arm_feature(env, ARM_FEATURE_V8)) { + /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ + ARMCPRegInfo hactlr2_reginfo = { + .name = "HACTLR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &hactlr2_reginfo); + } } if (arm_feature(env, ARM_FEATURE_CBAR)) { @@ -7977,6 +8033,125 @@ void aarch64_sync_64_to_32(CPUARMState *env) env->regs[15] = env->pc; } +static void take_aarch32_exception(CPUARMState *env, int new_mode, + uint32_t mask, uint32_t offset, + uint32_t newpc) +{ + /* Change the CPU state so as to actually take the exception. */ + switch_mode(env, new_mode); + /* + * For exceptions taken to AArch32 we must clear the SS bit in both + * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. + */ + env->uncached_cpsr &= ~PSTATE_SS; + env->spsr = cpsr_read(env); + /* Clear IT bits. */ + env->condexec_bits = 0; + /* Switch to the new mode, and to the correct instruction set. */ + env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; + /* Set new mode endianness */ + env->uncached_cpsr &= ~CPSR_E; + if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { + env->uncached_cpsr |= CPSR_E; + } + /* J and IL must always be cleared for exception entry */ + env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); + env->daif |= mask; + + if (new_mode == ARM_CPU_MODE_HYP) { + env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; + env->elr_el[2] = env->regs[15]; + } else { + /* + * this is a lie, as there was no c1_sys on V4T/V5, but who cares + * and we should just guard the thumb mode on V4 + */ + if (arm_feature(env, ARM_FEATURE_V4T)) { + env->thumb = + (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; + } + env->regs[14] = env->regs[15] + offset; + } + env->regs[15] = newpc; +} + +static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) +{ + /* + * Handle exception entry to Hyp mode; this is sufficiently + * different to entry to other AArch32 modes that we handle it + * separately here. + * + * The vector table entry used is always the 0x14 Hyp mode entry point, + * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. + * The offset applied to the preferred return address is always zero + * (see DDI0487C.a section G1.12.3). + * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. + */ + uint32_t addr, mask; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + switch (cs->exception_index) { + case EXCP_UDEF: + addr = 0x04; + break; + case EXCP_SWI: + addr = 0x14; + break; + case EXCP_BKPT: + /* Fall through to prefetch abort. */ + case EXCP_PREFETCH_ABORT: + env->cp15.ifar_s = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", + (uint32_t)env->exception.vaddress); + addr = 0x0c; + break; + case EXCP_DATA_ABORT: + env->cp15.dfar_s = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", + (uint32_t)env->exception.vaddress); + addr = 0x10; + break; + case EXCP_IRQ: + addr = 0x18; + break; + case EXCP_FIQ: + addr = 0x1c; + break; + case EXCP_HVC: + addr = 0x08; + break; + case EXCP_HYP_TRAP: + addr = 0x14; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + } + + if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { + env->cp15.esr_el[2] = env->exception.syndrome; + } + + if (arm_current_el(env) != 2 && addr < 0x14) { + addr = 0x14; + } + + mask = 0; + if (!(env->cp15.scr_el3 & SCR_EA)) { + mask |= CPSR_A; + } + if (!(env->cp15.scr_el3 & SCR_IRQ)) { + mask |= CPSR_I; + } + if (!(env->cp15.scr_el3 & SCR_FIQ)) { + mask |= CPSR_F; + } + + addr += env->cp15.hvbar; + + take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); +} + static void arm_cpu_do_interrupt_aarch32(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); @@ -8012,6 +8187,11 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); } + if (env->exception.target_el == 2) { + arm_cpu_do_interrupt_aarch32_hyp(cs); + return; + } + /* TODO: Vectored interrupt controller. */ switch (cs->exception_index) { case EXCP_UDEF: @@ -8119,29 +8299,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) env->cp15.scr_el3 &= ~SCR_NS; } - switch_mode (env, new_mode); - /* For exceptions taken to AArch32 we must clear the SS bit in both - * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. - */ - env->uncached_cpsr &= ~PSTATE_SS; - env->spsr = cpsr_read(env); - /* Clear IT bits. */ - env->condexec_bits = 0; - /* Switch to the new mode, and to the correct instruction set. */ - env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; - /* Set new mode endianness */ - env->uncached_cpsr &= ~CPSR_E; - if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { - env->uncached_cpsr |= CPSR_E; - } - env->daif |= mask; - /* this is a lie, as the was no c1_sys on V4T/V5, but who cares - * and we should just guard the thumb mode on V4 */ - if (arm_feature(env, ARM_FEATURE_V4T)) { - env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; - } - env->regs[14] = env->regs[15] + offset; - env->regs[15] = addr; + take_aarch32_exception(env, new_mode, mask, offset, addr); } /* Handle exception entry to a target EL which is using AArch64 */ @@ -11564,45 +11722,30 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - float##fsz tmp; \ - tmp = itype##_to_##float##fsz(x, fpst); \ - return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ -} +{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } -/* Notice that we want only input-denormal exception flags from the - * scalbn operation: the other possible flags (overflow+inexact if - * we overflow to infinity, output-denormal) aren't correct for the - * complete scale-and-convert operation. - */ -#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ -uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ - uint32_t shift, \ - void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - int old_exc_flags = get_float_exception_flags(fpst); \ - float##fsz tmp; \ - if (float##fsz##_is_any_nan(x)) { \ - float_raise(float_flag_invalid, fpst); \ - return 0; \ - } \ - tmp = float##fsz##_scalbn(x, shift, fpst); \ - old_exc_flags |= get_float_exception_flags(fpst) \ - & float_flag_input_denormal; \ - set_float_exception_flags(old_exc_flags, fpst); \ - return float##fsz##_to_##itype##round(tmp, fpst); \ +#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ +uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ + void *fpst) \ +{ \ + if (unlikely(float##fsz##_is_any_nan(x))) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ } #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + float_round_to_zero, _round_to_zero) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + get_float_rounding_mode(fpst), ) #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + get_float_rounding_mode(fpst), ) VFP_CONV_FIX(sh, d, 64, 64, int16) VFP_CONV_FIX(sl, d, 64, 64, int32) @@ -11622,87 +11765,84 @@ VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) #undef VFP_CONV_FLOAT_FIX_ROUND #undef VFP_CONV_FIX_A64 -/* Conversion to/from f16 can overflow to infinity before/after scaling. - * Therefore we convert to f64, scale, and then convert f64 to f16; or - * vice versa for conversion to integer. - * - * For 16- and 32-bit integers, the conversion to f64 never rounds. - * For 64-bit integers, any integer that would cause rounding will also - * overflow to f16 infinity, so there is no double rounding problem. - */ - -static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst) -{ - return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst); -} - uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) { - return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst); + return int32_to_float16_scalbn(x, -shift, fpst); } uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) { - return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst); + return uint32_to_float16_scalbn(x, -shift, fpst); } uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) { - return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst); + return int64_to_float16_scalbn(x, -shift, fpst); } uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) { - return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst); + return uint64_to_float16_scalbn(x, -shift, fpst); } -static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst) +uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) { - if (unlikely(float16_is_any_nan(f))) { + if (unlikely(float16_is_any_nan(x))) { float_raise(float_flag_invalid, fpst); return 0; - } else { - int old_exc_flags = get_float_exception_flags(fpst); - float64 ret; - - ret = float16_to_float64(f, true, fpst); - ret = float64_scalbn(ret, shift, fpst); - old_exc_flags |= get_float_exception_flags(fpst) - & float_flag_input_denormal; - set_float_exception_flags(old_exc_flags, fpst); - - return ret; } -} - -uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) -{ - return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst); + return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) { - return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst); + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) { - return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst); + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) { - return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst); + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) { - return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst); + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) { - return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst); + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); } /* Set the current fp rounding mode and return the old one. @@ -12331,6 +12471,7 @@ int arm_rmode_to_sf(int rmode) /* FIXME: add support for TIEAWAY and ODD */ qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", rmode); + /* fall through for now */ case FPROUNDING_TIEEVEN: default: rmode = float_round_nearest_even; |