summaryrefslogtreecommitdiffstats
path: root/target/arm/cpu.h
diff options
context:
space:
mode:
authorPeter Maydell2018-02-15 19:29:37 +0100
committerPeter Maydell2018-02-15 19:29:49 +0100
commit43bbce7fbef22adf687dd84934fd0b2f8df807a8 (patch)
tree6b723768dea3f977f642b110ca3a87d75ca8b10a /target/arm/cpu.h
parenthw/intc/armv7m_nvic: Implement v8M CPPWR register (diff)
downloadqemu-43bbce7fbef22adf687dd84934fd0b2f8df807a8.tar.gz
qemu-43bbce7fbef22adf687dd84934fd0b2f8df807a8.tar.xz
qemu-43bbce7fbef22adf687dd84934fd0b2f8df807a8.zip
hw/intc/armv7m_nvic: Implement cache ID registers
M profile cores have a similar setup for cache ID registers to A profile: * Cache Level ID Register (CLIDR) is a fixed value * Cache Type Register (CTR) is a fixed value * Cache Size ID Registers (CCSIDR) are a bank of registers; which one you see is selected by the Cache Size Selection Register (CSSELR) The only difference is that they're in the NVIC memory mapped register space rather than being coprocessor registers. Implement the M profile view of them. Since neither Cortex-M3 nor Cortex-M4 implement caches, we don't need to update their init functions and can leave the ctr/clidr/ccsidr[] fields in their ARMCPU structs at zero. Newer cores (like the Cortex-M33) will want to be able to set these ID registers to non-zero values, though. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20180209165810.6668-6-peter.maydell@linaro.org
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r--target/arm/cpu.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 51a3e16275..8938a7c953 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -496,6 +496,7 @@ typedef struct CPUARMState {
uint32_t faultmask[M_REG_NUM_BANKS];
uint32_t aircr; /* only holds r/w state if security extn implemented */
uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
+ uint32_t csselr[M_REG_NUM_BANKS];
} v7m;
/* Information associated with an exception about to be taken:
@@ -1325,6 +1326,23 @@ FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
+/* v7M CLIDR bits */
+FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21)
+FIELD(V7M_CLIDR, LOUIS, 21, 3)
+FIELD(V7M_CLIDR, LOC, 24, 3)
+FIELD(V7M_CLIDR, LOUU, 27, 3)
+FIELD(V7M_CLIDR, ICB, 30, 2)
+
+FIELD(V7M_CSSELR, IND, 0, 1)
+FIELD(V7M_CSSELR, LEVEL, 1, 3)
+/* We use the combination of InD and Level to index into cpu->ccsidr[];
+ * define a mask for this and check that it doesn't permit running off
+ * the end of the array.
+ */
+FIELD(V7M_CSSELR, INDEX, 0, 4)
+
+QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
+
/* If adding a feature bit which corresponds to a Linux ELF
* HWCAP bit, remember to update the feature-bit-to-hwcap
* mapping in linux-user/elfload.c:get_elf_hwcap().
@@ -2487,6 +2505,14 @@ static inline int arm_debug_target_el(CPUARMState *env)
}
}
+static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
+{
+ /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
+ * CSSELR is RAZ/WI.
+ */
+ return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+}
+
static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
{
if (arm_is_secure(env)) {