summaryrefslogtreecommitdiffstats
path: root/target/hppa/mem_helper.c
diff options
context:
space:
mode:
authorRichard Henderson2017-10-27 16:26:36 +0200
committerRichard Henderson2018-01-30 19:22:26 +0100
commit8d6ae7fb3a9c4369979bcc7188966fa725eed033 (patch)
treea0302eab8423fafb4926b6bdf107a59709d226c1 /target/hppa/mem_helper.c
parenttarget/hppa: Log unimplemented instructions (diff)
downloadqemu-8d6ae7fb3a9c4369979bcc7188966fa725eed033.tar.gz
qemu-8d6ae7fb3a9c4369979bcc7188966fa725eed033.tar.xz
qemu-8d6ae7fb3a9c4369979bcc7188966fa725eed033.zip
target/hppa: Implement I*TLBA and I*TLBP insns
The TLB can now be populated, but it cannot yet be cleared. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/hppa/mem_helper.c')
-rw-r--r--target/hppa/mem_helper.c80
1 files changed, 78 insertions, 2 deletions
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 334ef98a32..59619e9ba8 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -42,13 +42,40 @@ static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
hppa_tlb_entry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e && ent->entry_valid) {
+ if (ent->va_b <= addr && addr <= ent->va_e) {
return ent;
}
}
return NULL;
}
+static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
+{
+ CPUState *cs = CPU(hppa_env_get_cpu(env));
+ unsigned i, n = 1 << (2 * ent->page_size);
+ uint64_t addr = ent->va_b;
+
+ for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
+ /* Do not flush MMU_PHYS_IDX. */
+ tlb_flush_page_by_mmuidx(cs, addr, 0xf);
+ }
+
+ memset(ent, 0, sizeof(*ent));
+ ent->va_b = -1;
+}
+
+static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+{
+ hppa_tlb_entry *ent;
+ uint32_t i = env->tlb_last;
+
+ env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
+ ent = &env->tlb[i];
+
+ hppa_flush_tlb_ent(env, ent);
+ return ent;
+}
+
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot)
{
@@ -66,7 +93,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
/* Find a valid tlb entry that matches the virtual address. */
ent = hppa_find_tlb(env, addr);
- if (ent == NULL) {
+ if (ent == NULL || !ent->entry_valid) {
phys = 0;
prot = 0;
ret = (type & PAGE_EXEC ? EXCP_ITLB_MISS : EXCP_DTLB_MISS);
@@ -201,4 +228,53 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
}
+
+/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
+void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *empty = NULL;
+ int i;
+
+ /* Zap any old entries covering ADDR; notice empty entries on the way. */
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (!ent->entry_valid) {
+ empty = ent;
+ } else if (ent->va_b <= addr && addr <= ent->va_e) {
+ hppa_flush_tlb_ent(env, ent);
+ empty = ent;
+ }
+ }
+
+ /* If we didn't see an empty entry, evict one. */
+ if (empty == NULL) {
+ empty = hppa_alloc_tlb_ent(env);
+ }
+
+ /* Note that empty->entry_valid == 0 already. */
+ empty->va_b = addr & TARGET_PAGE_MASK;
+ empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
+ empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+}
+
+/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
+void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (unlikely(ent == NULL || ent->entry_valid)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
+ return;
+ }
+
+ ent->access_id = extract32(reg, 1, 18);
+ ent->u = extract32(reg, 19, 1);
+ ent->ar_pl2 = extract32(reg, 20, 2);
+ ent->ar_pl1 = extract32(reg, 22, 2);
+ ent->ar_type = extract32(reg, 24, 3);
+ ent->b = extract32(reg, 27, 1);
+ ent->d = extract32(reg, 28, 1);
+ ent->t = extract32(reg, 29, 1);
+ ent->entry_valid = 1;
+}
#endif /* CONFIG_USER_ONLY */