summaryrefslogtreecommitdiffstats
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c334
1 files changed, 261 insertions, 73 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7bf50fdd76..088956eecf 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4511,70 +4511,73 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
#ifdef TARGET_AARCH64
-static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
- uint64_t value)
-{
- unsigned int page_shift;
- unsigned int page_size_granule;
- uint64_t num;
- uint64_t scale;
- uint64_t exponent;
+typedef struct {
+ uint64_t base;
uint64_t length;
+} TLBIRange;
+
+static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
+ uint64_t value)
+{
+ unsigned int page_size_granule, page_shift, num, scale, exponent;
+ /* Extract one bit to represent the va selector in use. */
+ uint64_t select = sextract64(value, 36, 1);
+ ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
+ TLBIRange ret = { };
- num = extract64(value, 39, 5);
- scale = extract64(value, 44, 2);
page_size_granule = extract64(value, 46, 2);
- if (page_size_granule == 0) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+ /* The granule encoded in value must match the granule in use. */
+ if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
page_size_granule);
- return 0;
+ return ret;
}
page_shift = (page_size_granule - 1) * 2 + 12;
-
+ num = extract64(value, 39, 5);
+ scale = extract64(value, 44, 2);
exponent = (5 * scale) + 1;
- length = (num + 1) << (exponent + page_shift);
- return length;
-}
+ ret.length = (num + 1) << (exponent + page_shift);
-static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
- bool two_ranges)
-{
- /* TODO: ARMv8.7 FEAT_LPA2 */
- uint64_t pageaddr;
-
- if (two_ranges) {
- pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ if (param.select) {
+ ret.base = sextract64(value, 0, 37);
} else {
- pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ ret.base = extract64(value, 0, 37);
+ }
+ if (param.ds) {
+ /*
+ * With DS=1, BaseADDR is always shifted 16 so that it is able
+ * to address all 52 va bits. The input address is perforce
+ * aligned on a 64k boundary regardless of translation granule.
+ */
+ page_shift = 16;
}
+ ret.base <<= page_shift;
- return pageaddr;
+ return ret;
}
static void do_rvae_write(CPUARMState *env, uint64_t value,
int idxmap, bool synced)
{
ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
- bool two_ranges = regime_has_2_ranges(one_idx);
- uint64_t baseaddr, length;
+ TLBIRange range;
int bits;
- baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
- length = tlbi_aa64_range_get_length(env, value);
- bits = tlbbits_for_regime(env, one_idx, baseaddr);
+ range = tlbi_aa64_get_range(env, one_idx, value);
+ bits = tlbbits_for_regime(env, one_idx, range.base);
if (synced) {
tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
- baseaddr,
- length,
+ range.base,
+ range.length,
idxmap,
bits);
} else {
- tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
- length, idxmap, bits);
+ tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
+ range.length, idxmap, bits);
}
}
@@ -6423,11 +6426,18 @@ static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
int i = ri->crm;
- /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
- * register reads and behaves as if values written are sign extended.
+ /*
* Bits [1:0] are RES0.
+ *
+ * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
+ * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
+ * they contain the value written. It is CONSTRAINED UNPREDICTABLE
+ * whether the RESS bits are ignored when comparing an address.
+ *
+ * Therefore we are allowed to compare the entire register, which lets
+ * us avoid considering whether or not FEAT_LVA is actually enabled.
*/
- value = sextract64(value, 0, 49) & ~3ULL;
+ value &= ~3ULL;
raw_write(env, ri, value);
hw_watchpoint_update(cpu, i);
@@ -6473,10 +6483,19 @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
case 0: /* unlinked address match */
case 1: /* linked address match */
{
- /* Bits [63:49] are hardwired to the value of bit [48]; that is,
- * we behave as if the register was sign extended. Bits [1:0] are
- * RES0. The BAS field is used to allow setting breakpoints on 16
- * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
+ /*
+ * Bits [1:0] are RES0.
+ *
+ * It is IMPLEMENTATION DEFINED whether bits [63:49]
+ * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
+ * of the VA field ([48] or [52] for FEAT_LVA), or whether the
+ * value is read as written. It is CONSTRAINED UNPREDICTABLE
+ * whether the RESS bits are ignored when comparing an address.
+ * Therefore we are allowed to compare the entire register, which
+ * lets us avoid considering whether FEAT_LVA is actually enabled.
+ *
+ * The BAS field is used to allow setting breakpoints on 16-bit
+ * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
* a bp will fire if the addresses covered by the bp and the addresses
* covered by the insn overlap but the insn doesn't start at the
* start of the bp address range. We choose to require the insn and
@@ -6489,7 +6508,7 @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
*/
int bas = extract64(bcr, 5, 4);
- addr = sextract64(bvr, 0, 49) & ~3ULL;
+ addr = bvr & ~3ULL;
if (bas == 0) {
return;
}
@@ -11065,13 +11084,18 @@ do_fault:
* false otherwise.
*/
static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
- int inputsize, int stride)
+ int inputsize, int stride, int outputsize)
{
const int grainsize = stride + 3;
int startsizecheck;
- /* Negative levels are never allowed. */
- if (level < 0) {
+ /*
+ * Negative levels are usually not allowed...
+ * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
+ * begins with level -1. Note that previous feature tests will have
+ * eliminated this combination if it is not enabled.
+ */
+ if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
return false;
}
@@ -11081,22 +11105,19 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
}
if (is_aa64) {
- CPUARMState *env = &cpu->env;
- unsigned int pamax = arm_pamax(cpu);
-
switch (stride) {
case 13: /* 64KB Pages. */
- if (level == 0 || (level == 1 && pamax <= 42)) {
+ if (level == 0 || (level == 1 && outputsize <= 42)) {
return false;
}
break;
case 11: /* 16KB Pages. */
- if (level == 0 || (level == 1 && pamax <= 40)) {
+ if (level == 0 || (level == 1 && outputsize <= 40)) {
return false;
}
break;
case 9: /* 4KB Pages. */
- if (level == 0 && pamax <= 42) {
+ if (level == 0 && outputsize <= 42) {
return false;
}
break;
@@ -11105,8 +11126,8 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
}
/* Inputsize checks. */
- if (inputsize > pamax &&
- (arm_el_is_aa64(env, 1) || inputsize > 40)) {
+ if (inputsize > outputsize &&
+ (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
/* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
return false;
}
@@ -11152,6 +11173,31 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
}
#endif /* !CONFIG_USER_ONLY */
+/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
+static const uint8_t pamax_map[] = {
+ [0] = 32,
+ [1] = 36,
+ [2] = 40,
+ [3] = 42,
+ [4] = 44,
+ [5] = 48,
+ [6] = 52,
+};
+
+/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
+unsigned int arm_pamax(ARMCPU *cpu)
+{
+ unsigned int parange =
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+
+ /*
+ * id_aa64mmfr0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error.
+ */
+ assert(parange < ARRAY_SIZE(pamax_map));
+ return pamax_map[parange];
+}
+
static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
@@ -11190,8 +11236,9 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
- bool epd, hpd, using16k, using64k;
- int select, tsz, tbi, max_tsz;
+ bool epd, hpd, using16k, using64k, tsz_oob, ds;
+ int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
+ ARMCPU *cpu = env_archcpu(env);
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
@@ -11205,6 +11252,9 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
hpd = extract32(tcr, 24, 1);
}
epd = false;
+ sh = extract32(tcr, 12, 2);
+ ps = extract32(tcr, 16, 3);
+ ds = extract64(tcr, 32, 1);
} else {
/*
* Bit 55 is always between the two regions, and is canonical for
@@ -11214,6 +11264,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
if (!select) {
tsz = extract32(tcr, 0, 6);
epd = extract32(tcr, 7, 1);
+ sh = extract32(tcr, 12, 2);
using64k = extract32(tcr, 14, 1);
using16k = extract32(tcr, 15, 1);
hpd = extract64(tcr, 41, 1);
@@ -11223,18 +11274,61 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
using64k = tg == 3;
tsz = extract32(tcr, 16, 6);
epd = extract32(tcr, 23, 1);
+ sh = extract32(tcr, 28, 2);
hpd = extract64(tcr, 42, 1);
}
+ ps = extract64(tcr, 32, 3);
+ ds = extract64(tcr, 59, 1);
}
- if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
+ if (cpu_isar_feature(aa64_st, cpu)) {
max_tsz = 48 - using64k;
} else {
max_tsz = 39;
}
- tsz = MIN(tsz, max_tsz);
- tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
+ /*
+ * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
+ * adjust the effective value of DS, as documented.
+ */
+ min_tsz = 16;
+ if (using64k) {
+ if (cpu_isar_feature(aa64_lva, cpu)) {
+ min_tsz = 12;
+ }
+ ds = false;
+ } else if (ds) {
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_Stage2_S:
+ if (using16k) {
+ ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
+ } else {
+ ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
+ }
+ break;
+ default:
+ if (using16k) {
+ ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
+ } else {
+ ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
+ }
+ break;
+ }
+ if (ds) {
+ min_tsz = 12;
+ }
+ }
+
+ if (tsz > max_tsz) {
+ tsz = max_tsz;
+ tsz_oob = true;
+ } else if (tsz < min_tsz) {
+ tsz = min_tsz;
+ tsz_oob = true;
+ } else {
+ tsz_oob = false;
+ }
/* Present TBI as a composite with TBID. */
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
@@ -11245,12 +11339,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
return (ARMVAParameters) {
.tsz = tsz,
+ .ps = ps,
+ .sh = sh,
.select = select,
.tbi = tbi,
.epd = epd,
.hpd = hpd,
.using16k = using16k,
.using64k = using64k,
+ .tsz_oob = tsz_oob,
+ .ds = ds,
};
}
@@ -11361,7 +11459,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
target_ulong page_size;
uint32_t attrs;
int32_t stride;
- int addrsize, inputsize;
+ int addrsize, inputsize, outputsize;
TCR *tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
@@ -11371,16 +11469,44 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* TODO: This code does not support shareability levels. */
if (aarch64) {
+ int ps;
+
param = aa64_va_parameters(env, address, mmu_idx,
access_type != MMU_INST_FETCH);
level = 0;
+
+ /*
+ * If TxSZ is programmed to a value larger than the maximum,
+ * or smaller than the effective minimum, it is IMPLEMENTATION
+ * DEFINED whether we behave as if the field were programmed
+ * within bounds, or if a level 0 Translation fault is generated.
+ *
+ * With FEAT_LVA, fault on less than minimum becomes required,
+ * so our choice is to always raise the fault.
+ */
+ if (param.tsz_oob) {
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+
addrsize = 64 - 8 * param.tbi;
inputsize = 64 - param.tsz;
+
+ /*
+ * Bound PS by PARANGE to find the effective output address size.
+ * ID_AA64MMFR0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error.
+ */
+ ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ ps = MIN(ps, param.ps);
+ assert(ps < ARRAY_SIZE(pamax_map));
+ outputsize = pamax_map[ps];
} else {
param = aa32_va_parameters(env, address, mmu_idx);
level = 1;
addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
inputsize = addrsize - param.tsz;
+ outputsize = 40;
}
/*
@@ -11448,10 +11574,19 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
+ uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
uint32_t startlevel;
bool ok;
- if (!aarch64 || stride == 9) {
+ /* SL2 is RES0 unless DS=1 & 4kb granule. */
+ if (param.ds && stride == 9 && sl2) {
+ if (sl0 != 0) {
+ level = 0;
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+ startlevel = -1;
+ } else if (!aarch64 || stride == 9) {
/* AArch32 or 4KB pages */
startlevel = 2 - sl0;
@@ -11465,7 +11600,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* Check that the starting level is valid. */
ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
- inputsize, stride);
+ inputsize, stride, outputsize);
if (!ok) {
fault_type = ARMFault_Translation;
goto do_fault;
@@ -11473,24 +11608,49 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
level = startlevel;
}
- indexmask_grainsize = (1ULL << (stride + 3)) - 1;
- indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
+ indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
+ indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
+
+ /*
+ * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
+ *
+ * Otherwise, if the base address is out of range, raise AddressSizeFault.
+ * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
+ * but we've just cleared the bits above 47, so simplify the test.
+ */
+ if (outputsize > 48) {
+ descaddr |= extract64(ttbr, 2, 4) << 48;
+ } else if (descaddr >> outputsize) {
+ level = 0;
+ fault_type = ARMFault_AddressSize;
+ goto do_fault;
+ }
+
/*
* We rely on this masking to clear the RES0 bits at the bottom of the TTBR
* and also to mask out CnP (bit 0) which could validly be non-zero.
*/
descaddr &= ~indexmask;
- /* The address field in the descriptor goes up to bit 39 for ARMv7
- * but up to bit 47 for ARMv8, but we use the descaddrmask
- * up to bit 39 for AArch32, because we don't need other bits in that case
- * to construct next descriptor address (anyway they should be all zeroes).
- */
- descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
- ~indexmask_grainsize;
+ /*
+ * For AArch32, the address field in the descriptor goes up to bit 39
+ * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
+ * or an AddressSize fault is raised. So for v8 we extract those SBZ
+ * bits as part of the address, which will be checked via outputsize.
+ * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
+ * the highest bits of a 52-bit output are placed elsewhere.
+ */
+ if (param.ds) {
+ descaddrmask = MAKE_64BIT_MASK(0, 50);
+ } else if (arm_feature(env, ARM_FEATURE_V8)) {
+ descaddrmask = MAKE_64BIT_MASK(0, 48);
+ } else {
+ descaddrmask = MAKE_64BIT_MASK(0, 40);
+ }
+ descaddrmask &= ~indexmask_grainsize;
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
@@ -11515,8 +11675,26 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* Invalid, or the Reserved level 3 encoding */
goto do_fault;
}
+
descaddr = descriptor & descaddrmask;
+ /*
+ * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
+ * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
+ * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
+ * raise AddressSizeFault.
+ */
+ if (outputsize > 48) {
+ if (param.ds) {
+ descaddr |= extract64(descriptor, 8, 2) << 50;
+ } else {
+ descaddr |= extract64(descriptor, 12, 4) << 48;
+ }
+ } else if (descaddr >> outputsize) {
+ fault_type = ARMFault_AddressSize;
+ goto do_fault;
+ }
+
if ((descriptor & 2) && (level < 3)) {
/* Table entry. The top five bits are attributes which may
* propagate down through lower levels of the table (and
@@ -11605,7 +11783,17 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
assert(attrindx <= 7);
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
}
- cacheattrs->shareability = extract32(attrs, 6, 2);
+
+ /*
+ * For FEAT_LPA2 and effective DS, the SH field in the attributes
+ * was re-purposed for output address bits. The SH attribute in
+ * that case comes from TCR_ELx, which we extracted earlier.
+ */
+ if (param.ds) {
+ cacheattrs->shareability = param.sh;
+ } else {
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+ }
*phys_ptr = descaddr;
*page_size_ptr = page_size;