summaryrefslogtreecommitdiffstats
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c421
1 files changed, 328 insertions, 93 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 972a766730..dc9c29f998 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -44,7 +44,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
bool s1_is_el0,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
#endif
static void switch_mode(CPUARMState *env, int mode);
@@ -2011,9 +2012,19 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
uint32_t valid_mask = 0x3fff;
ARMCPU *cpu = env_archcpu(env);
- if (arm_el_is_aa64(env, 3)) {
+ if (ri->state == ARM_CP_STATE_AA64) {
value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
valid_mask &= ~SCR_NET;
+
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= SCR_TLOR;
+ }
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ valid_mask |= SCR_API | SCR_APK;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= SCR_ATA;
+ }
} else {
valid_mask &= ~(SCR_RW | SCR_ST);
}
@@ -2032,12 +2043,6 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
valid_mask &= ~SCR_SMD;
}
}
- if (cpu_isar_feature(aa64_lor, cpu)) {
- valid_mask |= SCR_TLOR;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- valid_mask |= SCR_API | SCR_APK;
- }
/* Clear all-context RES0 bits. */
value &= valid_mask;
@@ -4697,6 +4702,22 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
ARMCPU *cpu = env_archcpu(env);
+ if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
+ /* M bit is RAZ/WI for PMSA with no MPU implemented */
+ value &= ~SCTLR_M;
+ }
+
+ /* ??? Lots of these bits are not implemented. */
+
+ if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
+ if (ri->opc1 == 6) { /* SCTLR_EL3 */
+ value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
+ } else {
+ value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
+ SCTLR_ATA0 | SCTLR_ATA);
+ }
+ }
+
if (raw_read(env, ri) == value) {
/* Skip the TLB flush if nothing actually changed; Linux likes
* to do a lot of pointless SCTLR writes.
@@ -4704,13 +4725,8 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
- if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
- /* M bit is RAZ/WI for PMSA with no MPU implemented */
- value &= ~SCTLR_M;
- }
-
raw_write(env, ri, value);
- /* ??? Lots of these bits are not implemented. */
+
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(CPU(cpu));
@@ -5236,17 +5252,22 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
if (cpu_isar_feature(aa64_pauth, cpu)) {
valid_mask |= HCR_API | HCR_APK;
}
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
+ }
}
/* Clear RES0 bits. */
value &= valid_mask;
- /* These bits change the MMU setup:
+ /*
+ * These bits change the MMU setup:
* HCR_VM enables stage 2 translation
* HCR_PTW forbids certain page-table setups
- * HCR_DC Disables stage1 and enables stage2 translation
+ * HCR_DC disables stage1 and enables stage2 translation
+ * HCR_DCT enables tagging on (disabled) stage1 translation
*/
- if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
+ if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
tlb_flush(CPU(cpu));
}
env->cp15.hcr_el2 = value;
@@ -5861,6 +5882,9 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
{ K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
"ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
+ { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
+ "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
+
/* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
/* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
};
@@ -6835,6 +6859,165 @@ static const ARMCPRegInfo dcpodp_reg[] = {
};
#endif /*CONFIG_USER_ONLY*/
+static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 &&
+ arm_feature(env, ARM_FEATURE_EL2) &&
+ !(arm_hcr_el2_eff(env) & HCR_ATA)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !(env->cp15.scr_el3 & SCR_ATA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_TCO;
+}
+
+static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
+{
+ env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
+}
+
+static const ARMCPRegInfo mte_reginfo[] = {
+ { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
+ { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
+ { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
+ { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
+ { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
+ { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
+ { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
+ .access = PL1_R, .accessfn = access_aa64_tid5,
+ .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
+ { .name = "TCO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
+ .type = ARM_CP_NO_RAW,
+ .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
+ { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL1_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL1_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
+ { .name = "TCO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
+ .type = ARM_CP_CONST, .access = PL0_RW, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
+ { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
+ .access = PL0_W, .type = ARM_CP_DC_GVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_DC_GZVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ REGINFO_SENTINEL
+};
+
#endif
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -7960,6 +8143,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
}
#endif /*CONFIG_USER_ONLY*/
+
+ /*
+ * If full MTE is enabled, add all of the system registers.
+ * If only "instructions available at EL0" are enabled,
+ * then define only a RAZ/WI version of PSTATE.TCO.
+ */
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ define_arm_cp_regs(cpu, mte_reginfo);
+ define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
+ } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
+ define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
+ define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
+ }
#endif
if (cpu_isar_feature(any_predinv, cpu)) {
@@ -9509,6 +9705,9 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
break;
}
}
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ new_mode |= PSTATE_TCO;
+ }
pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = 1;
@@ -9614,42 +9813,6 @@ void arm_cpu_do_interrupt(CPUState *cs)
}
#endif /* !CONFIG_USER_ONLY */
-/* Return the exception level which controls this address translation regime */
-static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_Stage2:
- case ARMMMUIdx_E2:
- return 2;
- case ARMMMUIdx_SE3:
- return 3;
- case ARMMMUIdx_SE10_0:
- return arm_el_is_aa64(env, 3) ? 1 : 3;
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_E1:
- case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_MPrivNegPri:
- case ARMMMUIdx_MUserNegPri:
- case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MUser:
- case ARMMMUIdx_MSPrivNegPri:
- case ARMMMUIdx_MSUserNegPri:
- case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSUser:
- return 1;
- default:
- g_assert_not_reached();
- }
-}
-
uint64_t arm_sctlr(CPUARMState *env, int el)
{
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
@@ -9732,15 +9895,6 @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
#endif /* !CONFIG_USER_ONLY */
-/* Return the TCR controlling this translation regime */
-static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- if (mmu_idx == ARMMMUIdx_Stage2) {
- return &env->cp15.vtcr_el2;
- }
- return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
-}
-
/* Convert a possible stage1+2 MMU index into the appropriate
* stage 1 MMU index
*/
@@ -10541,6 +10695,16 @@ static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
}
}
+static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 57, 2);
+ } else {
+ /* Replicate the single TCMA bit so we always have 2 bits. */
+ return extract32(tcr, 30, 1) * 3;
+ }
+}
+
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
@@ -10935,22 +11099,19 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- txattrs->target_tlb_bit0 = true;
+ arm_tlb_bti_gp(txattrs) = true;
}
- if (cacheattrs != NULL) {
- if (mmu_idx == ARMMMUIdx_Stage2) {
- cacheattrs->attrs = convert_stage2_attrs(env,
- extract32(attrs, 0, 4));
- } else {
- /* Index into MAIR registers for cache attributes */
- uint8_t attrindx = extract32(attrs, 0, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
- assert(attrindx <= 7);
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
- }
- cacheattrs->shareability = extract32(attrs, 6, 2);
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
}
+ cacheattrs->shareability = extract32(attrs, 6, 2);
*phys_ptr = descaddr;
*page_size_ptr = page_size;
@@ -11673,9 +11834,19 @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
*/
static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
{
- uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
- uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
+ uint8_t s1lo, s2lo, s1hi, s2hi;
ARMCacheAttrs ret;
+ bool tagged = false;
+
+ if (s1.attrs == 0xf0) {
+ tagged = true;
+ s1.attrs = 0xff;
+ }
+
+ s1lo = extract32(s1.attrs, 0, 4);
+ s2lo = extract32(s2.attrs, 0, 4);
+ s1hi = extract32(s1.attrs, 4, 4);
+ s2hi = extract32(s2.attrs, 4, 4);
/* Combine shareability attributes (table D4-43) */
if (s1.shareability == 2 || s2.shareability == 2) {
@@ -11723,6 +11894,11 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
}
}
+ /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
+ if (tagged && ret.attrs == 0xff) {
+ ret.attrs = 0xf0;
+ }
+
return ret;
}
@@ -11785,28 +11961,32 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
mmu_idx == ARMMMUIdx_E10_0,
phys_ptr, attrs, &s2_prot,
- page_size, fi,
- cacheattrs != NULL ? &cacheattrs2 : NULL);
+ page_size, fi, &cacheattrs2);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
*prot &= s2_prot;
- /* Combine the S1 and S2 cache attributes, if needed */
- if (!ret && cacheattrs != NULL) {
- if (env->cp15.hcr_el2 & HCR_DC) {
- /*
- * HCR.DC forces the first stage attributes to
- * Normal Non-Shareable,
- * Inner Write-Back Read-Allocate Write-Allocate,
- * Outer Write-Back Read-Allocate Write-Allocate.
- */
+ /* If S2 fails, return early. */
+ if (ret) {
+ return ret;
+ }
+
+ /* Combine the S1 and S2 cache attributes. */
+ if (env->cp15.hcr_el2 & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ * Do not overwrite Tagged within attrs.
+ */
+ if (cacheattrs->attrs != 0xf0) {
cacheattrs->attrs = 0xff;
- cacheattrs->shareability = 0;
}
- *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ cacheattrs->shareability = 0;
}
-
- return ret;
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ return 0;
} else {
/*
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
@@ -11867,6 +12047,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
/* Definitely a real MMU, not an MPU */
if (regime_translation_disabled(env, mmu_idx)) {
+ uint64_t hcr;
+ uint8_t memattr;
+
/*
* MMU disabled. S1 addresses within aa64 translation regimes are
* still checked for bounds -- see AArch64.TranslateAddressS1Off.
@@ -11904,6 +12087,27 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*page_size = TARGET_PAGE_SIZE;
+
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
+ hcr = arm_hcr_el2_eff(env);
+ cacheattrs->shareability = 0;
+ if (hcr & HCR_DC) {
+ if (hcr & HCR_DCT) {
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
+ } else {
+ memattr = 0xff; /* Normal, WB, RWA */
+ }
+ } else if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
+ cacheattrs->shareability = 2; /* outer sharable */
+ } else {
+ memattr = 0x00; /* Device, nGnRnE */
+ }
+ cacheattrs->attrs = memattr;
return 0;
}
@@ -11931,11 +12135,12 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
bool ret;
ARMMMUFaultInfo fi = {};
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMCacheAttrs cacheattrs = {};
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fi, NULL);
+ attrs, &prot, &page_size, &fi, &cacheattrs);
if (ret) {
return -1;
@@ -12565,6 +12770,36 @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
}
+ if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
+ /*
+ * Set MTE_ACTIVE if any access may be Checked, and leave clear
+ * if all accesses must be Unchecked:
+ * 1) If no TBI, then there are no tags in the address to check,
+ * 2) If Tag Check Override, then all accesses are Unchecked,
+ * 3) If Tag Check Fail == 0, then Checked access have no effect,
+ * 4) If no Allocation Tag Access, then all accesses are Unchecked.
+ */
+ if (allocation_tag_access_enabled(env, el, sctlr)) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
+ if (tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
+ }
+ }
+ /* And again for unprivileged accesses, if required. */
+ if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
+ && tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & SCTLR_TCF0)
+ && allocation_tag_access_enabled(env, 0, sctlr)) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
+ }
+ /* Cache TCMA as well as TBI. */
+ flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
+ aa64_va_parameter_tcma(tcr, mmu_idx));
+ }
+
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
}