summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/translate-all.c13
-rw-r--r--hw/adc/npcm7xx_adc.c4
-rw-r--r--hw/arm/bcm2835_peripherals.c26
-rw-r--r--hw/intc/armv7m_nvic.c9
-rw-r--r--include/hw/arm/bcm2835_peripherals.h2
-rw-r--r--target/arm/cpu.c2
-rw-r--r--target/arm/cpu.h38
-rw-r--r--target/arm/debug_helper.c2
-rw-r--r--target/arm/helper.c128
-rw-r--r--target/arm/internals.h34
-rw-r--r--target/arm/ptw.c38
-rw-r--r--target/arm/tlb_helper.c2
-rw-r--r--target/arm/translate-a64.c4
-rw-r--r--tests/qtest/bcm2835-dma-test.c118
-rw-r--r--tests/qtest/meson.build3
-rw-r--r--tests/qtest/npcm7xx_adc-test.c2
16 files changed, 306 insertions, 119 deletions
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 8fd23a9d05..ef62a199c7 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2256,6 +2256,15 @@ int page_get_flags(target_ulong address)
return p->flags;
}
+/*
+ * Allow the target to decide if PAGE_TARGET_[12] may be reset.
+ * By default, they are not kept.
+ */
+#ifndef PAGE_TARGET_STICKY
+#define PAGE_TARGET_STICKY 0
+#endif
+#define PAGE_STICKY (PAGE_ANON | PAGE_TARGET_STICKY)
+
/* Modify the flags of a page and invalidate the code if necessary.
The flag PAGE_WRITE_ORG is positioned automatically depending
on PAGE_WRITE. The mmap_lock should already be held. */
@@ -2299,8 +2308,8 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
p->target_data = NULL;
p->flags = flags;
} else {
- /* Using mprotect on a page does not change MAP_ANON. */
- p->flags = (p->flags & PAGE_ANON) | flags;
+ /* Using mprotect on a page does not change sticky bits. */
+ p->flags = (p->flags & PAGE_STICKY) | flags;
}
}
}
diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c
index 0f0a9f63e2..bc6f3f55e6 100644
--- a/hw/adc/npcm7xx_adc.c
+++ b/hw/adc/npcm7xx_adc.c
@@ -36,7 +36,7 @@ REG32(NPCM7XX_ADC_DATA, 0x4)
#define NPCM7XX_ADC_CON_INT BIT(18)
#define NPCM7XX_ADC_CON_EN BIT(17)
#define NPCM7XX_ADC_CON_RST BIT(16)
-#define NPCM7XX_ADC_CON_CONV BIT(14)
+#define NPCM7XX_ADC_CON_CONV BIT(13)
#define NPCM7XX_ADC_CON_DIV(rv) extract32(rv, 1, 8)
#define NPCM7XX_ADC_MAX_RESULT 1023
@@ -242,7 +242,7 @@ static void npcm7xx_adc_init(Object *obj)
for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) {
object_property_add_uint32_ptr(obj, "adci[*]",
- &s->adci[i], OBJ_PROP_FLAG_WRITE);
+ &s->adci[i], OBJ_PROP_FLAG_READWRITE);
}
object_property_add_uint32_ptr(obj, "vref",
&s->vref, OBJ_PROP_FLAG_WRITE);
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
index 48538c9360..3c2a4160cd 100644
--- a/hw/arm/bcm2835_peripherals.c
+++ b/hw/arm/bcm2835_peripherals.c
@@ -23,6 +23,13 @@
/* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */
#define BCM2835_SDHC_CAPAREG 0x52134b4
+/*
+ * According to Linux driver & DTS, dma channels 0--10 have separate IRQ,
+ * while channels 11--14 share one IRQ:
+ */
+#define SEPARATE_DMA_IRQ_MAX 10
+#define ORGATED_DMA_IRQ_COUNT 4
+
static void create_unimp(BCM2835PeripheralState *ps,
UnimplementedDeviceState *uds,
const char *name, hwaddr ofs, hwaddr size)
@@ -101,6 +108,11 @@ static void bcm2835_peripherals_init(Object *obj)
/* DMA Channels */
object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA);
+ object_initialize_child(obj, "orgated-dma-irq",
+ &s->orgated_dma_irq, TYPE_OR_IRQ);
+ object_property_set_int(OBJECT(&s->orgated_dma_irq), "num-lines",
+ ORGATED_DMA_IRQ_COUNT, &error_abort);
+
object_property_add_const_link(OBJECT(&s->dma), "dma-mr",
OBJECT(&s->gpu_bus_mr));
@@ -322,12 +334,24 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1));
- for (n = 0; n <= 12; n++) {
+ for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) {
sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n,
qdev_get_gpio_in_named(DEVICE(&s->ic),
BCM2835_IC_GPU_IRQ,
INTERRUPT_DMA0 + n));
}
+ if (!qdev_realize(DEVICE(&s->orgated_dma_irq), NULL, errp)) {
+ return;
+ }
+ for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma),
+ SEPARATE_DMA_IRQ_MAX + 1 + n,
+ qdev_get_gpio_in(DEVICE(&s->orgated_dma_irq), n));
+ }
+ qdev_connect_gpio_out(DEVICE(&s->orgated_dma_irq), 0,
+ qdev_get_gpio_in_named(DEVICE(&s->ic),
+ BCM2835_IC_GPU_IRQ,
+ INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1));
/* THERMAL */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) {
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 13df002ce4..1f7763964c 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -2389,8 +2389,15 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ /*
+ * Note that if the input line is still held high and the interrupt
+ * is not active then rule R_CVJS requires that the Pending state
+ * remains set; in that case we mustn't let it be cleared.
+ */
if (value & (1 << i) &&
- (attrs.secure || s->itns[startvec + i])) {
+ (attrs.secure || s->itns[startvec + i]) &&
+ !(setval == 0 && s->vectors[startvec + i].level &&
+ !s->vectors[startvec + i].active)) {
s->vectors[startvec + i].pending = setval;
}
}
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
index d864879421..c9d25d493e 100644
--- a/include/hw/arm/bcm2835_peripherals.h
+++ b/include/hw/arm/bcm2835_peripherals.h
@@ -17,6 +17,7 @@
#include "hw/char/bcm2835_aux.h"
#include "hw/display/bcm2835_fb.h"
#include "hw/dma/bcm2835_dma.h"
+#include "hw/or-irq.h"
#include "hw/intc/bcm2835_ic.h"
#include "hw/misc/bcm2835_property.h"
#include "hw/misc/bcm2835_rng.h"
@@ -55,6 +56,7 @@ struct BCM2835PeripheralState {
BCM2835AuxState aux;
BCM2835FBState fb;
BCM2835DMAState dma;
+ qemu_or_irq orgated_dma_irq;
BCM2835ICState ic;
BCM2835PropertyState property;
BCM2835RngState rng;
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5de7e097e9..1b7b3d76bb 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -226,7 +226,7 @@ static void arm_cpu_reset(DeviceState *dev)
* Enable TBI0 but not TBI1.
* Note that this must match useronly_clean_ptr.
*/
- env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37);
+ env->cp15.tcr_el[1] = 5 | (1ULL << 37);
/* Enable MTE */
if (cpu_isar_feature(aa64_mte, cpu)) {
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 1e36a839ee..e890ee074d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -166,12 +166,6 @@ typedef struct ARMGenericTimer {
#define GTIMER_HYPVIRT 4
#define NUM_GTIMERS 5
-typedef struct {
- uint64_t raw_tcr;
- uint32_t mask;
- uint32_t base_mask;
-} TCR;
-
#define VTCR_NSW (1u << 29)
#define VTCR_NSA (1u << 30)
#define VSTCR_SW VTCR_NSW
@@ -339,9 +333,9 @@ typedef struct CPUArchState {
uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
/* MMU translation table base control. */
- TCR tcr_el[4];
- TCR vtcr_el2; /* Virtualization Translation Control. */
- TCR vstcr_el2; /* Secure Virtualization Translation Control. */
+ uint64_t tcr_el[4];
+ uint64_t vtcr_el2; /* Virtualization Translation Control. */
+ uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
uint32_t c2_data; /* MPU data cacheable bits. */
uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
@@ -1418,6 +1412,25 @@ FIELD(CPTR_EL3, TCPAC, 31, 1)
#define TTBCR_SH1 (1U << 28)
#define TTBCR_EAE (1U << 31)
+FIELD(VTCR, T0SZ, 0, 6)
+FIELD(VTCR, SL0, 6, 2)
+FIELD(VTCR, IRGN0, 8, 2)
+FIELD(VTCR, ORGN0, 10, 2)
+FIELD(VTCR, SH0, 12, 2)
+FIELD(VTCR, TG0, 14, 2)
+FIELD(VTCR, PS, 16, 3)
+FIELD(VTCR, VS, 19, 1)
+FIELD(VTCR, HA, 21, 1)
+FIELD(VTCR, HD, 22, 1)
+FIELD(VTCR, HWU59, 25, 1)
+FIELD(VTCR, HWU60, 26, 1)
+FIELD(VTCR, HWU61, 27, 1)
+FIELD(VTCR, HWU62, 28, 1)
+FIELD(VTCR, NSW, 29, 1)
+FIELD(VTCR, NSA, 30, 1)
+FIELD(VTCR, DS, 32, 1)
+FIELD(VTCR, SL2, 33, 1)
+
/* Bit definitions for ARMv8 SPSR (PSTATE) format.
* Only these are valid when in AArch64 mode; in
* AArch32 mode SPSRs are basically CPSR-format.
@@ -3392,9 +3405,12 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
/*
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
+ * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
+ * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR.
*/
-#define PAGE_BTI PAGE_TARGET_1
-#define PAGE_MTE PAGE_TARGET_2
+#define PAGE_BTI PAGE_TARGET_1
+#define PAGE_MTE PAGE_TARGET_2
+#define PAGE_TARGET_STICKY PAGE_MTE
#ifdef TARGET_TAGGED_ADDRESSES
/**
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index d09fccb0a4..c21739242c 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -439,7 +439,7 @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env)
using_lpae = true;
} else {
if (arm_feature(env, ARM_FEATURE_LPAE) &&
- (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
+ (env->cp15.tcr_el[target_el] & TTBCR_EAE)) {
using_lpae = true;
}
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index cfcad97ce0..1a8b06410e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3606,19 +3606,21 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
};
-static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- TCR *tcr = raw_ptr(env, ri);
- int maskshift = extract32(value, 0, 3);
+ ARMCPU *cpu = env_archcpu(env);
if (!arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
- /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
- * using Long-desciptor translation table format */
+ /*
+ * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
+ * using Long-descriptor translation table format
+ */
value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
} else if (arm_feature(env, ARM_FEATURE_EL3)) {
- /* In an implementation that includes the Security Extensions
+ /*
+ * In an implementation that includes the Security Extensions
* TTBCR has additional fields PD0 [4] and PD1 [5] for
* Short-descriptor translation table format.
*/
@@ -3628,55 +3630,23 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
- /* Update the masks corresponding to the TCR bank being written
- * Note that we always calculate mask and base_mask, but
- * they are only used for short-descriptor tables (ie if EAE is 0);
- * for long-descriptor tables the TCR fields are used differently
- * and the mask and base_mask values are meaningless.
- */
- tcr->raw_tcr = value;
- tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
- tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
-}
-
-static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- ARMCPU *cpu = env_archcpu(env);
- TCR *tcr = raw_ptr(env, ri);
-
if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
tlb_flush(CPU(cpu));
}
- /* Preserve the high half of TCR_EL1, set via TTBCR2. */
- value = deposit64(tcr->raw_tcr, 0, 32, value);
- vmsa_ttbcr_raw_write(env, ri, value);
-}
-
-static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- TCR *tcr = raw_ptr(env, ri);
-
- /* Reset both the TCR as well as the masks corresponding to the bank of
- * the TCR being reset.
- */
- tcr->raw_tcr = 0;
- tcr->mask = 0;
- tcr->base_mask = 0xffffc000u;
+ raw_write(env, ri, value);
}
static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = env_archcpu(env);
- TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
tlb_flush(CPU(cpu));
- tcr->raw_tcr = value;
+ raw_write(env, ri, value);
}
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3780,15 +3750,15 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.writefn = vmsa_tcr_el12_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .raw_writefn = raw_write,
+ .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
- .raw_writefn = vmsa_ttbcr_raw_write,
- /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
- .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
- offsetof(CPUARMState, cp15.tcr_el[1])} },
+ .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
+ offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
};
/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
@@ -3799,8 +3769,8 @@ static const ARMCPRegInfo ttbcr2_reginfo = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.type = ARM_CP_ALIAS,
.bank_fieldoffsets = {
- offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
- offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
},
};
@@ -4216,7 +4186,7 @@ static int vae1_tlbmask(CPUARMState *env)
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
int select = extract64(addr, 55, 1);
@@ -5403,19 +5373,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .writefn = vmsa_tcr_el12_write,
- /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
.type = ARM_CP_ALIAS,
.access = PL2_RW, .accessfn = access_el3_aa32ns,
- .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
{ .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
.access = PL2_RW,
- /* no .writefn needed as this can't cause an ASID change;
- * no .raw_writefn or .resetfn needed as we never use mask/base_mask
- */
+ /* no .writefn needed as this can't cause an ASID change */
.fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
{ .name = "VTTBR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 6, .crm = 2,
@@ -5645,12 +5612,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL3_RW,
- /* no .writefn needed as this can't cause an ASID change;
- * we must provide a .raw_writefn and .resetfn because we handle
- * reset and migration for the AArch32 TTBCR(S), which might be
- * using mask and base_mask.
- */
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
+ /* no .writefn needed as this can't cause an ASID change */
+ .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
@@ -10158,7 +10121,7 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
bool epd, hpd, using16k, using64k, tsz_oob, ds;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
ARMCPU *cpu = env_archcpu(env);
@@ -10849,7 +10812,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
{
CPUARMTBFlags flags = {};
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
uint64_t sctlr;
int tbii, tbid;
@@ -10882,13 +10845,19 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
int sme_el = sme_exception_el(env, el);
+ bool sm = FIELD_EX64(env->svcr, SVCR, SM);
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
if (sme_el == 0) {
/* Similarly, do not compute SVL if SME is disabled. */
- DP_TBFLAG_A64(flags, SVL, sve_vqm1_for_el_sm(env, el, true));
+ int svl = sve_vqm1_for_el_sm(env, el, true);
+ DP_TBFLAG_A64(flags, SVL, svl);
+ if (sm) {
+ /* If SVE is disabled, we will not have set VL above. */
+ DP_TBFLAG_A64(flags, VL, svl);
+ }
}
- if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ if (sm) {
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
}
@@ -11222,6 +11191,21 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
}
}
+static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
+{
+ int exc_el;
+
+ if (sm) {
+ exc_el = sme_exception_el(env, el);
+ } else {
+ exc_el = sve_exception_el(env, el);
+ }
+ if (exc_el) {
+ return 0; /* disabled */
+ }
+ return sve_vqm1_for_el_sm(env, el, sm);
+}
+
/*
* Notice a change in SVE vector size when changing EL.
*/
@@ -11230,7 +11214,7 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
{
ARMCPU *cpu = env_archcpu(env);
int old_len, new_len;
- bool old_a64, new_a64;
+ bool old_a64, new_a64, sm;
/* Nothing to do if no SVE. */
if (!cpu_isar_feature(aa64_sve, cpu)) {
@@ -11250,7 +11234,8 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
* invoke ResetSVEState when taking an exception from, or
* returning to, AArch32 state when PSTATE.SM is enabled.
*/
- if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
+ sm = FIELD_EX64(env->svcr, SVCR, SM);
+ if (old_a64 != new_a64 && sm) {
arm_reset_sve_state(env);
return;
}
@@ -11267,10 +11252,13 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
* we already have the correct register contents when encountering the
* vq0->vq0 transition between EL0->EL1.
*/
- old_len = (old_a64 && !sve_exception_el(env, old_el)
- ? sve_vqm1_for_el(env, old_el) : 0);
- new_len = (new_a64 && !sve_exception_el(env, new_el)
- ? sve_vqm1_for_el(env, new_el) : 0);
+ old_len = new_len = 0;
+ if (old_a64) {
+ old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
+ }
+ if (new_a64) {
+ new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
+ }
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 00e2e710f6..b8fefdff67 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -252,9 +252,9 @@ unsigned int arm_pamax(ARMCPU *cpu);
*/
static inline bool extended_addresses_enabled(CPUARMState *env)
{
- TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
return arm_el_is_aa64(env, 1) ||
- (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
}
/* Update a QEMU watchpoint based on the information the guest has set in the
@@ -777,20 +777,36 @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
}
-/* Return the TCR controlling this translation regime */
-static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+/*
+ * These are the fields in VTCR_EL2 which affect both the Secure stage 2
+ * and the Non-Secure stage 2 translation regimes (and hence which are
+ * not present in VSTCR_EL2).
+ */
+#define VTCR_SHARED_FIELD_MASK \
+ (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
+ R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
+ R_VTCR_DS_MASK)
+
+/* Return the value of the TCR controlling this translation regime */
+static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
if (mmu_idx == ARMMMUIdx_Stage2) {
- return &env->cp15.vtcr_el2;
+ return env->cp15.vtcr_el2;
}
if (mmu_idx == ARMMMUIdx_Stage2_S) {
/*
- * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
- * those are not currently used by QEMU, so just return VSTCR_EL2.
+ * Secure stage 2 shares fields from VTCR_EL2. We merge those
+ * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
+ * value so the callers don't need to special case this.
+ *
+ * If a future architecture change defines bits in VSTCR_EL2 that
+ * overlap with these VTCR_EL2 fields we may need to revisit this.
*/
- return &env->cp15.vstcr_el2;
+ uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
+ v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
+ return v;
}
- return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+ return env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
/**
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index e71fc1f429..3261039d93 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -241,9 +241,9 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
if (arm_is_secure_below_el3(env)) {
/* Check if page table walk is to secure or non-secure PA space. */
if (*is_secure) {
- *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
- *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
} else {
assert(!*is_secure);
@@ -315,20 +315,24 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t *table, uint32_t address)
{
/* Note that we can only get here for an AArch32 PL0/PL1 lookup */
- TCR *tcr = regime_tcr(env, mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx);
+ int maskshift = extract32(tcr, 0, 3);
+ uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
+ uint32_t base_mask;
- if (address & tcr->mask) {
- if (tcr->raw_tcr & TTBCR_PD1) {
+ if (address & mask) {
+ if (tcr & TTBCR_PD1) {
/* Translation table walk disabled for TTBR1 */
return false;
}
*table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
} else {
- if (tcr->raw_tcr & TTBCR_PD0) {
+ if (tcr & TTBCR_PD0) {
/* Translation table walk disabled for TTBR0 */
return false;
}
- *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
+ base_mask = ~((uint32_t)0x3fffu >> maskshift);
+ *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
}
*table |= (address >> 18) & 0x3ffc;
return true;
@@ -820,7 +824,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
ARMMMUIdx mmu_idx)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
uint32_t el = regime_el(env, mmu_idx);
int select, tsz;
bool epd, hpd;
@@ -994,7 +998,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
uint32_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
- TCR *tcr = regime_tcr(env, mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
uint64_t descaddrmask;
@@ -1112,8 +1116,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
* For stage 2 translations the starting level is specified by the
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
- uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
- uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
+ uint32_t sl0 = extract32(tcr, 6, 2);
+ uint32_t sl2 = extract64(tcr, 33, 1);
uint32_t startlevel;
bool ok;
@@ -2337,9 +2341,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ipa_secure = attrs->secure;
if (arm_is_secure_below_el3(env)) {
if (ipa_secure) {
- attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
- attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
} else {
assert(!ipa_secure);
@@ -2381,11 +2385,11 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
if (arm_is_secure_below_el3(env)) {
if (ipa_secure) {
attrs->secure =
- !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
} else {
attrs->secure =
- !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
- || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
+ !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
+ || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
}
}
return 0;
@@ -2462,7 +2466,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
int r_el = regime_el(env, mmu_idx);
if (arm_el_is_aa64(env, r_el)) {
int pamax = arm_pamax(env_archcpu(env));
- uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ uint64_t tcr = env->cp15.tcr_el[r_el];
int addrtop, tbi;
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 7d8a86b3c4..5a709eab56 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -20,7 +20,7 @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
return true;
}
if (arm_feature(env, ARM_FEATURE_LPAE)
- && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
return true;
}
return false;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b7b64f7358..163df8c615 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -3138,7 +3138,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
bool is_store = false;
bool is_extended = false;
bool is_unpriv = (idx == 2);
- bool iss_valid = !is_vector;
+ bool iss_valid;
bool post_index;
bool writeback;
int memidx;
@@ -3191,6 +3191,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
g_assert_not_reached();
}
+ iss_valid = !is_vector && !writeback;
+
if (rn == 31) {
gen_check_sp_alignment(s);
}
diff --git a/tests/qtest/bcm2835-dma-test.c b/tests/qtest/bcm2835-dma-test.c
new file mode 100644
index 0000000000..8293d822b9
--- /dev/null
+++ b/tests/qtest/bcm2835-dma-test.c
@@ -0,0 +1,118 @@
+/*
+ * QTest testcase for BCM283x DMA engine (on Raspberry Pi 3)
+ * and its interrupts coming to Interrupt Controller.
+ *
+ * Copyright (c) 2022 Auriga LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest-single.h"
+
+/* Offsets in raspi3b platform: */
+#define RASPI3_DMA_BASE 0x3f007000
+#define RASPI3_IC_BASE 0x3f00b200
+
+/* Used register/fields definitions */
+
+/* DMA engine registers: */
+#define BCM2708_DMA_CS 0
+#define BCM2708_DMA_ACTIVE (1 << 0)
+#define BCM2708_DMA_INT (1 << 2)
+
+#define BCM2708_DMA_ADDR 0x04
+
+#define BCM2708_DMA_INT_STATUS 0xfe0
+
+/* DMA Trasfer Info fields: */
+#define BCM2708_DMA_INT_EN (1 << 0)
+#define BCM2708_DMA_D_INC (1 << 4)
+#define BCM2708_DMA_S_INC (1 << 8)
+
+/* Interrupt controller registers: */
+#define IRQ_PENDING_BASIC 0x00
+#define IRQ_GPU_PENDING1_AGGR (1 << 8)
+#define IRQ_PENDING_1 0x04
+#define IRQ_ENABLE_1 0x10
+
+/* Data for the test: */
+#define SCB_ADDR 256
+#define S_ADDR 32
+#define D_ADDR 64
+#define TXFR_LEN 32
+const uint32_t check_data = 0x12345678;
+
+static void bcm2835_dma_test_interrupt(int dma_c, int irq_line)
+{
+ uint64_t dma_base = RASPI3_DMA_BASE + dma_c * 0x100;
+ int gpu_irq_line = 16 + irq_line;
+
+ /* Check that interrupts are silent by default: */
+ writel(RASPI3_IC_BASE + IRQ_ENABLE_1, 1 << gpu_irq_line);
+ int isr = readl(dma_base + BCM2708_DMA_INT_STATUS);
+ g_assert_cmpint(isr, ==, 0);
+ uint32_t reg0 = readl(dma_base + BCM2708_DMA_CS);
+ g_assert_cmpint(reg0, ==, 0);
+ uint32_t ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC);
+ g_assert_cmpint(ic_pending, ==, 0);
+ uint32_t gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1);
+ g_assert_cmpint(gpu_pending1, ==, 0);
+
+ /* Prepare Control Block: */
+ writel(SCB_ADDR + 0, BCM2708_DMA_S_INC | BCM2708_DMA_D_INC |
+ BCM2708_DMA_INT_EN); /* transfer info */
+ writel(SCB_ADDR + 4, S_ADDR); /* source address */
+ writel(SCB_ADDR + 8, D_ADDR); /* destination address */
+ writel(SCB_ADDR + 12, TXFR_LEN); /* transfer length */
+ writel(dma_base + BCM2708_DMA_ADDR, SCB_ADDR);
+
+ writel(S_ADDR, check_data);
+ for (int word = S_ADDR + 4; word < S_ADDR + TXFR_LEN; word += 4) {
+ writel(word, ~check_data);
+ }
+ /* Perform the transfer: */
+ writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_ACTIVE);
+
+ /* Check that destination == source: */
+ uint32_t data = readl(D_ADDR);
+ g_assert_cmpint(data, ==, check_data);
+ for (int word = D_ADDR + 4; word < D_ADDR + TXFR_LEN; word += 4) {
+ data = readl(word);
+ g_assert_cmpint(data, ==, ~check_data);
+ }
+
+ /* Check that interrupt status is set both in DMA and IC controllers: */
+ isr = readl(RASPI3_DMA_BASE + BCM2708_DMA_INT_STATUS);
+ g_assert_cmpint(isr, ==, 1 << dma_c);
+
+ ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC);
+ g_assert_cmpint(ic_pending, ==, IRQ_GPU_PENDING1_AGGR);
+
+ gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1);
+ g_assert_cmpint(gpu_pending1, ==, 1 << gpu_irq_line);
+
+ /* Clean up, clear interrupt: */
+ writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_INT);
+}
+
+static void bcm2835_dma_test_interrupts(void)
+{
+ /* DMA engines 0--10 have separate IRQ lines, 11--14 - only one: */
+ bcm2835_dma_test_interrupt(0, 0);
+ bcm2835_dma_test_interrupt(10, 10);
+ bcm2835_dma_test_interrupt(11, 11);
+ bcm2835_dma_test_interrupt(14, 11);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/bcm2835/dma/test_interrupts",
+ bcm2835_dma_test_interrupts);
+ qtest_start("-machine raspi3b");
+ ret = g_test_run();
+ qtest_end();
+ return ret;
+}
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 31287a9173..3a474010e4 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -218,7 +218,8 @@ qtests_aarch64 = \
['arm-cpu-features',
'numa-test',
'boot-serial-test',
- 'migration-test']
+ 'migration-test',
+ 'bcm2835-dma-test']
qtests_s390x = \
(slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \
diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c
index 3fa6d9ece0..8048044d28 100644
--- a/tests/qtest/npcm7xx_adc-test.c
+++ b/tests/qtest/npcm7xx_adc-test.c
@@ -50,7 +50,7 @@
#define CON_INT BIT(18)
#define CON_EN BIT(17)
#define CON_RST BIT(16)
-#define CON_CONV BIT(14)
+#define CON_CONV BIT(13)
#define CON_DIV(rv) extract32(rv, 1, 8)
#define FST_RDST BIT(1)