diff options
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r-- | target/arm/helper.c | 332 |
1 files changed, 206 insertions, 126 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index db3b1ea72d..dde64a487a 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1752,8 +1752,9 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Begin with base v8.0 state. */ - uint32_t valid_mask = 0x3fff; + uint64_t valid_mask = 0x3fff; ARMCPU *cpu = env_archcpu(env); + uint64_t changed; /* * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always @@ -1789,6 +1790,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (cpu_isar_feature(aa64_doublefault, cpu)) { valid_mask |= SCR_EASE | SCR_NMEA; } + if (cpu_isar_feature(aa64_sme, cpu)) { + valid_mask |= SCR_ENTP2; + } } else { valid_mask &= ~(SCR_RW | SCR_ST); if (cpu_isar_feature(aa32_ras, cpu)) { @@ -1813,7 +1817,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* Clear all-context RES0 bits. */ value &= valid_mask; - raw_write(env, ri, value); + changed = env->cp15.scr_el3 ^ value; + env->cp15.scr_el3 = value; + + /* + * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then + * we must invalidate all TLBs below EL3. + */ + if (changed & SCR_NS) { + tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2)); + } } static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2644,9 +2663,6 @@ static int gt_phys_redir_timeridx(CPUARMState *env) case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return GTIMER_HYP; default: return GTIMER_PHYS; @@ -2659,9 +2675,6 @@ static int gt_virt_redir_timeridx(CPUARMState *env) case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return GTIMER_HYPVIRT; default: return GTIMER_VIRT; @@ -3188,7 +3201,8 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, #ifdef CONFIG_TCG static uint64_t do_ats_write(CPUARMState *env, uint64_t value, - MMUAccessType access_type, ARMMMUIdx mmu_idx) + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure) { bool ret; uint64_t par64; @@ -3196,7 +3210,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, ARMMMUFaultInfo fi = {}; GetPhysAddrResult res = {}; - ret = get_phys_addr(env, value, access_type, mmu_idx, &res, &fi); + ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx, + is_secure, &res, &fi); /* * ATS operations only do S1 or S1+S2 translations, so we never @@ -3308,8 +3323,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, /* Create a 64-bit PAR */ par64 = (1 << 11); /* LPAE bit always set */ if (!ret) { - par64 |= res.phys & ~0xfffULL; - if (!res.attrs.secure) { + par64 |= res.f.phys_addr & ~0xfffULL; + if (!res.f.attrs.secure) { par64 |= (1 << 9); /* NS */ } par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ @@ -3333,13 +3348,13 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, */ if (!ret) { /* We do not set any attribute bits in the PAR */ - if (res.page_size == (1 << 24) + if (res.f.lg_page_size == 24 && arm_feature(env, ARM_FEATURE_V7)) { - par64 = (res.phys & 0xff000000) | (1 << 1); + par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); } else { - par64 = res.phys & 0xfffff000; + par64 = res.f.phys_addr & 0xfffff000; } - if (!res.attrs.secure) { + if (!res.f.attrs.secure) { par64 |= (1 << 9); /* NS */ } } else { @@ -3367,17 +3382,17 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_SE3; + mmu_idx = ARMMMUIdx_E3; + secure = true; break; case 2: g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ /* fall through */ case 1: if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { - mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN - : ARMMMUIdx_Stage1_E1_PAN); + mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { - mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; + mmu_idx = ARMMMUIdx_Stage1_E1; } break; default: @@ -3388,14 +3403,15 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_SE10_0; + mmu_idx = ARMMMUIdx_E10_0; + secure = true; break; case 2: g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ mmu_idx = ARMMMUIdx_Stage1_E0; break; case 1: - mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; + mmu_idx = ARMMMUIdx_Stage1_E0; break; default: g_assert_not_reached(); @@ -3404,16 +3420,18 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) case 4: /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ mmu_idx = ARMMMUIdx_E10_1; + secure = false; break; case 6: /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ mmu_idx = ARMMMUIdx_E10_0; + secure = false; break; default: g_assert_not_reached(); } - par64 = do_ats_write(env, value, access_type, mmu_idx); + par64 = do_ats_write(env, value, access_type, mmu_idx, secure); A32_BANKED_CURRENT_REG_SET(env, par, par64); #else @@ -3429,7 +3447,8 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; uint64_t par64; - par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); + /* There is no SecureEL2 for AArch32. */ + par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false); A32_BANKED_CURRENT_REG_SET(env, par, par64); #else @@ -3461,36 +3480,37 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, switch (ri->opc1) { case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { - mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN - : ARMMMUIdx_Stage1_E1_PAN); + mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { - mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; + mmu_idx = ARMMMUIdx_Stage1_E1; } break; case 4: /* AT S1E2R, AT S1E2W */ - mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2; + mmu_idx = ARMMMUIdx_E2; break; case 6: /* AT S1E3R, AT S1E3W */ - mmu_idx = ARMMMUIdx_SE3; + mmu_idx = ARMMMUIdx_E3; + secure = true; break; default: g_assert_not_reached(); } break; case 2: /* AT S1E0R, AT S1E0W */ - mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; + mmu_idx = ARMMMUIdx_Stage1_E0; break; case 4: /* AT S12E1R, AT S12E1W */ - mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; + mmu_idx = ARMMMUIdx_E10_1; break; case 6: /* AT S12E0R, AT S12E0W */ - mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; + mmu_idx = ARMMMUIdx_E10_0; break; default: g_assert_not_reached(); } - env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); + env->cp15.par_el[1] = do_ats_write(env, value, access_type, + mmu_idx, secure); #else /* Handled by hardware accelerator. */ g_assert_not_reached(); @@ -3753,11 +3773,6 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint16_t mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E20_0; - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - tlb_flush_by_mmuidx(env_cpu(env), mask); } raw_write(env, ri, value); @@ -3777,11 +3792,6 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint16_t mask = ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - tlb_flush_by_mmuidx(cs, mask); raw_write(env, ri, value); } @@ -4252,11 +4262,6 @@ static int vae1_tlbmask(CPUARMState *env) ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; } - - if (arm_is_secure_below_el3(env)) { - mask >>= ARM_MMU_IDX_A_NS; - } - return mask; } @@ -4283,10 +4288,6 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr) mmu_idx = ARMMMUIdx_E10_0; } - if (arm_is_secure_below_el3(env)) { - mmu_idx &= ~ARM_MMU_IDX_A_NS; - } - return tlbbits_for_regime(env, mmu_idx, addr); } @@ -4319,30 +4320,17 @@ static int alle1_tlbmask(CPUARMState *env) * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. */ - if (arm_is_secure_below_el3(env)) { - return ARMMMUIdxBit_SE10_1 | - ARMMMUIdxBit_SE10_1_PAN | - ARMMMUIdxBit_SE10_0; - } else { - return ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; - } + return (ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0); } static int e2_tlbmask(CPUARMState *env) { - if (arm_is_secure_below_el3(env)) { - return ARMMMUIdxBit_SE20_0 | - ARMMMUIdxBit_SE20_2 | - ARMMMUIdxBit_SE20_2_PAN | - ARMMMUIdxBit_SE2; - } else { - return ARMMMUIdxBit_E20_0 | - ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2; - } + return (ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2); } static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4369,7 +4357,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4395,7 +4383,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); } static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4423,7 +4411,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4462,12 +4450,10 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - bool secure = arm_is_secure_below_el3(env); - int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2; - int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2, - pageaddr); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr); - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2, bits); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4475,10 +4461,10 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_SE3, bits); + ARMMMUIdxBit_E3, bits); } #ifdef TARGET_AARCH64 @@ -4487,6 +4473,24 @@ typedef struct { uint64_t length; } TLBIRange; +static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) +{ + /* + * Note that the TLBI range TG field encoding differs from both + * TG0 and TG1 encodings. + */ + switch (tg) { + case 1: + return Gran4K; + case 2: + return Gran16K; + case 3: + return Gran64K; + default: + return GranInvalid; + } +} + static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, uint64_t value) { @@ -4495,17 +4499,19 @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, uint64_t select = sextract64(value, 36, 1); ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true); TLBIRange ret = { }; + ARMGranuleSize gran; page_size_granule = extract64(value, 46, 2); + gran = tlbi_range_tg_to_gran_size(page_size_granule); /* The granule encoded in value must match the granule in use. */ - if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) { + if (gran != param.gran) { qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", page_size_granule); return ret; } - page_shift = (page_size_granule - 1) * 2 + 12; + page_shift = arm_granule_bits(gran); num = extract64(value, 39, 5); scale = extract64(value, 44, 2); exponent = (5 * scale) + 1; @@ -4584,8 +4590,7 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env, static int vae2_tlbmask(CPUARMState *env) { - return (arm_is_secure_below_el3(env) - ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2); + return ARMMMUIdxBit_E2; } static void tlbi_aa64_rvae2_write(CPUARMState *env, @@ -4631,8 +4636,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env, * flush-last-level-only. */ - do_rvae_write(env, value, ARMMMUIdxBit_SE3, - tlb_force_broadcast(env)); + do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); } static void tlbi_aa64_rvae3is_write(CPUARMState *env, @@ -4646,7 +4650,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env, * flush-last-level-only or inner/outer specific flushes. */ - do_rvae_write(env, value, ARMMMUIdxBit_SE3, true); + do_rvae_write(env, value, ARMMMUIdxBit_E3, true); } #endif @@ -5245,15 +5249,15 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, } /* - * Return the effective value of HCR_EL2. + * Return the effective value of HCR_EL2, at the given security state. * Bits that are not included here: * RW (read from SCR_EL3.RW as needed) */ -uint64_t arm_hcr_el2_eff(CPUARMState *env) +uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure) { uint64_t ret = env->cp15.hcr_el2; - if (!arm_is_el2_enabled(env)) { + if (!arm_is_el2_enabled_secstate(env, secure)) { /* * "This register has no effect if EL2 is not enabled in the * current Security state". This is ARMv8.4-SecEL2 speak for @@ -5312,6 +5316,11 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env) return ret; } +uint64_t arm_hcr_el2_eff(CPUARMState *env) +{ + return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env)); +} + /* * Corresponds to ARM pseudocode function ELIsInHost(). */ @@ -10259,8 +10268,7 @@ uint64_t arm_sctlr(CPUARMState *env, int el) /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0) - ? 2 : 1; + el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; } return env->cp15.sctlr_el[el]; } @@ -10299,20 +10307,105 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) } } +static ARMGranuleSize tg0_to_gran_size(int tg) +{ + switch (tg) { + case 0: + return Gran4K; + case 1: + return Gran64K; + case 2: + return Gran16K; + default: + return GranInvalid; + } +} + +static ARMGranuleSize tg1_to_gran_size(int tg) +{ + switch (tg) { + case 1: + return Gran16K; + case 2: + return Gran4K; + case 3: + return Gran64K; + default: + return GranInvalid; + } +} + +static inline bool have4k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu) + : cpu_isar_feature(aa64_tgran4, cpu); +} + +static inline bool have16k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu) + : cpu_isar_feature(aa64_tgran16, cpu); +} + +static inline bool have64k(ARMCPU *cpu, bool stage2) +{ + return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu) + : cpu_isar_feature(aa64_tgran64, cpu); +} + +static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran, + bool stage2) +{ + switch (gran) { + case Gran4K: + if (have4k(cpu, stage2)) { + return gran; + } + break; + case Gran16K: + if (have16k(cpu, stage2)) { + return gran; + } + break; + case Gran64K: + if (have64k(cpu, stage2)) { + return gran; + } + break; + case GranInvalid: + break; + } + /* + * If the guest selects a granule size that isn't implemented, + * the architecture requires that we behave as if it selected one + * that is (with an IMPDEF choice of which one to pick). We choose + * to implement the smallest supported granule size. + */ + if (have4k(cpu, stage2)) { + return Gran4K; + } + if (have16k(cpu, stage2)) { + return Gran16K; + } + assert(have64k(cpu, stage2)); + return Gran64K; +} + ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data) { uint64_t tcr = regime_tcr(env, mmu_idx); - bool epd, hpd, using16k, using64k, tsz_oob, ds; + bool epd, hpd, tsz_oob, ds; int select, tsz, tbi, max_tsz, min_tsz, ps, sh; + ARMGranuleSize gran; ARMCPU *cpu = env_archcpu(env); + bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; if (!regime_has_2_ranges(mmu_idx)) { select = 0; tsz = extract32(tcr, 0, 6); - using64k = extract32(tcr, 14, 1); - using16k = extract32(tcr, 15, 1); - if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { + gran = tg0_to_gran_size(extract32(tcr, 14, 2)); + if (stage2) { /* VTCR_EL2 */ hpd = false; } else { @@ -10330,16 +10423,13 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, select = extract64(va, 55, 1); if (!select) { tsz = extract32(tcr, 0, 6); + gran = tg0_to_gran_size(extract32(tcr, 14, 2)); epd = extract32(tcr, 7, 1); sh = extract32(tcr, 12, 2); - using64k = extract32(tcr, 14, 1); - using16k = extract32(tcr, 15, 1); hpd = extract64(tcr, 41, 1); } else { - int tg = extract32(tcr, 30, 2); - using16k = tg == 1; - using64k = tg == 3; tsz = extract32(tcr, 16, 6); + gran = tg1_to_gran_size(extract32(tcr, 30, 2)); epd = extract32(tcr, 23, 1); sh = extract32(tcr, 28, 2); hpd = extract64(tcr, 42, 1); @@ -10348,8 +10438,10 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ds = extract64(tcr, 59, 1); } + gran = sanitize_gran_size(cpu, gran, stage2); + if (cpu_isar_feature(aa64_st, cpu)) { - max_tsz = 48 - using64k; + max_tsz = 48 - (gran == Gran64K); } else { max_tsz = 39; } @@ -10359,7 +10451,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, * adjust the effective value of DS, as documented. */ min_tsz = 16; - if (using64k) { + if (gran == Gran64K) { if (cpu_isar_feature(aa64_lva, cpu)) { min_tsz = 12; } @@ -10368,14 +10460,14 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, switch (mmu_idx) { case ARMMMUIdx_Stage2: case ARMMMUIdx_Stage2_S: - if (using16k) { + if (gran == Gran16K) { ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu); } else { ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu); } break; default: - if (using16k) { + if (gran == Gran16K) { ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu); } else { ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu); @@ -10412,10 +10504,9 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, .tbi = tbi, .epd = epd, .hpd = hpd, - .using16k = using16k, - .using64k = using64k, .tsz_oob = tsz_oob, .ds = ds, + .gran = gran, }; } @@ -10804,22 +10895,15 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE20_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: return 1; case ARMMMUIdx_E2: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE2: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return 2; - case ARMMMUIdx_SE3: + case ARMMMUIdx_E3: return 3; default: g_assert_not_reached(); @@ -10872,15 +10956,11 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) } break; case 3: - return ARMMMUIdx_SE3; + return ARMMMUIdx_E3; default: g_assert_not_reached(); } - if (arm_is_secure_below_el3(env)) { - idx &= ~ARM_MMU_IDX_A_NS; - } - return idx; } @@ -10945,6 +11025,10 @@ static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, DP_TBFLAG_M32(flags, STACKCHECK, 1); } + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) { + DP_TBFLAG_M32(flags, SECURE, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } @@ -11079,15 +11163,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: /* TODO: ARMv8.3-NV */ DP_TBFLAG_A64(flags, UNPRIV, 1); break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: /* * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. |