diff options
author | Richard Henderson | 2020-02-07 15:04:24 +0100 |
---|---|---|
committer | Peter Maydell | 2020-02-07 15:04:24 +0100 |
commit | b9f6033c1a5fb7da55ed353794db8ec064f78bb2 (patch) | |
tree | b080257e1a02574681a1ae5ae29fb43c7fea5590 /target/arm/cpu.h | |
parent | target/arm: Tidy ARMMMUIdx m-profile definitions (diff) | |
download | qemu-b9f6033c1a5fb7da55ed353794db8ec064f78bb2.tar.gz qemu-b9f6033c1a5fb7da55ed353794db8ec064f78bb2.tar.xz qemu-b9f6033c1a5fb7da55ed353794db8ec064f78bb2.zip |
target/arm: Reorganize ARMMMUIdx
Prepare for, but do not yet implement, the EL2&0 regime.
This involves adding the new MMUIdx enumerators and adjusting
some of the MMUIdx related predicates to match.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200206105448.4726-20-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r-- | target/arm/cpu.h | 134 |
1 files changed, 58 insertions, 76 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h index ad92873943..3fc0e6e746 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2819,18 +2819,21 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * + NonSecure EL1 & 0 stage 1 * + NonSecure EL1 & 0 stage 2 * + NonSecure EL2 - * + Secure EL1 & EL0 + * + NonSecure EL2 & 0 (ARMv8.1-VHE) + * + Secure EL1 & 0 * + Secure EL3 * If EL3 is 32-bit: * + NonSecure PL1 & 0 stage 1 * + NonSecure PL1 & 0 stage 2 * + NonSecure PL2 - * + Secure PL0 & PL1 + * + Secure PL0 + * + Secure PL1 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) * * For QEMU, an mmu_idx is not quite the same as a translation regime because: - * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they - * may differ in access permissions even if the VA->PA map is the same + * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, + * because they may differ in access permissions even if the VA->PA map is + * the same * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 * translation, which means that we have one mmu_idx that deals with two * concatenated translation regimes [this sort of combined s1+2 TLB is @@ -2842,19 +2845,23 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" * translation regimes, because they map reasonably well to each other * and they can't both be active at the same time. - * This gives us the following list of mmu_idx values: + * 5. we want to be able to use the TLB for accesses done as part of a + * stage1 page table walk, rather than having to walk the stage2 page + * table over and over. * - * NS EL0 (aka NS PL0) stage 1+2 - * NS EL1 (aka NS PL1) stage 1+2 + * This gives us the following list of cases: + * + * NS EL0 EL1&0 stage 1+2 (aka NS PL0) + * NS EL1 EL1&0 stage 1+2 (aka NS PL1) + * NS EL0 EL2&0 + * NS EL2 EL2&0 * NS EL2 (aka NS PL2) + * S EL0 EL1&0 (aka S PL0) + * S EL1 EL1&0 (not used if EL3 is 32 bit) * S EL3 (aka S PL1) - * S EL0 (aka S PL0) - * S EL1 (not used if EL3 is 32 bit) - * NS EL0+1 stage 2 + * NS EL1&0 stage 2 * - * (The last of these is an mmu_idx because we want to be able to use the TLB - * for the accesses done as part of a stage 1 page table walk, rather than - * having to walk the stage 2 page table over and over.) + * for a total of 9 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes * as A profile. They only need to distinguish NS EL0 and NS EL1 (and @@ -2892,26 +2899,47 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * For M profile we arrange them to have a bit for priv, a bit for negpri * and a bit for secure. */ -#define ARM_MMU_IDX_A 0x10 /* A profile */ -#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ -#define ARM_MMU_IDX_M 0x40 /* M profile */ +#define ARM_MMU_IDX_A 0x10 /* A profile */ +#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ +#define ARM_MMU_IDX_M 0x40 /* M profile */ -/* meanings of the bits for M profile mmu idx values */ -#define ARM_MMU_IDX_M_PRIV 0x1 +/* Meanings of the bits for M profile mmu idx values */ +#define ARM_MMU_IDX_M_PRIV 0x1 #define ARM_MMU_IDX_M_NEGPRI 0x2 -#define ARM_MMU_IDX_M_S 0x4 +#define ARM_MMU_IDX_M_S 0x4 /* Secure */ -#define ARM_MMU_IDX_TYPE_MASK (~0x7) -#define ARM_MMU_IDX_COREIDX_MASK 0x7 +#define ARM_MMU_IDX_TYPE_MASK \ + (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) +#define ARM_MMU_IDX_COREIDX_MASK 0xf typedef enum ARMMMUIdx { - ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, - ARMMMUIdx_E10_1 = 1 | ARM_MMU_IDX_A, - ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A, - ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A, - ARMMMUIdx_SE10_0 = 4 | ARM_MMU_IDX_A, - ARMMMUIdx_SE10_1 = 5 | ARM_MMU_IDX_A, - ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A, + /* + * A-profile. + */ + ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, + + ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, + + ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A, + + ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A, + ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A, + ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A, + + ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A, + + /* + * These are not allocated TLBs and are used only for AT system + * instructions or for the first stage of an S12 page table walk. + */ + ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, + + /* + * M-profile. + */ ARMMMUIdx_MUser = ARM_MMU_IDX_M, ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, @@ -2920,11 +2948,6 @@ typedef enum ARMMMUIdx { ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, - /* Indexes below here don't have TLBs and are used only for AT system - * instructions or for the first stage of an S12 page table walk. - */ - ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, } ARMMMUIdx; /* @@ -2936,8 +2959,10 @@ typedef enum ARMMMUIdx { typedef enum ARMMMUIdxBit { TO_CORE_BIT(E10_0), + TO_CORE_BIT(E20_0), TO_CORE_BIT(E10_1), TO_CORE_BIT(E2), + TO_CORE_BIT(E20_2), TO_CORE_BIT(SE10_0), TO_CORE_BIT(SE10_1), TO_CORE_BIT(SE3), @@ -2957,49 +2982,6 @@ typedef enum ARMMMUIdxBit { #define MMU_USER_IDX 0 -static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) -{ - return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; -} - -static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) -{ - if (arm_feature(env, ARM_FEATURE_M)) { - return mmu_idx | ARM_MMU_IDX_M; - } else { - return mmu_idx | ARM_MMU_IDX_A; - } -} - -/* Return the exception level we're running at if this is our mmu_idx */ -static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) -{ - switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) { - case ARM_MMU_IDX_A: - return mmu_idx & 3; - case ARM_MMU_IDX_M: - return mmu_idx & ARM_MMU_IDX_M_PRIV; - default: - g_assert_not_reached(); - } -} - -/* - * Return the MMU index for a v7M CPU with all relevant information - * manually specified. - */ -ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, - bool secstate, bool priv, bool negpri); - -/* Return the MMU index for a v7M CPU in the specified security and - * privilege state. - */ -ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, - bool secstate, bool priv); - -/* Return the MMU index for a v7M CPU in the specified security state */ -ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); - /** * cpu_mmu_index: * @env: The cpu environment |