summaryrefslogtreecommitdiffstats
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c69
1 files changed, 61 insertions, 8 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index dfcd9ae168..685e0f2ee4 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1007,6 +1007,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
}
+static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
+{
+#if TCG_OVERSIZED_GUEST
+ return *(target_ulong *)((uintptr_t)entry + ofs);
+#else
+ /* ofs might correspond to .addr_write, so use atomic_read */
+ return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
+#endif
+}
+
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
@@ -1017,14 +1027,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
- target_ulong cmp;
-
- /* elt_ofs might correspond to .addr_write, so use atomic_read */
-#if TCG_OVERSIZED_GUEST
- cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
-#else
- cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
-#endif
+ target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@@ -1108,6 +1111,56 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
}
}
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t tlb_addr, page;
+ size_t elt_ofs;
+
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ elt_ofs = offsetof(CPUTLBEntry, addr_read);
+ break;
+ case MMU_DATA_STORE:
+ elt_ofs = offsetof(CPUTLBEntry, addr_write);
+ break;
+ case MMU_INST_FETCH:
+ elt_ofs = offsetof(CPUTLBEntry, addr_code);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ page = addr & TARGET_PAGE_MASK;
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+
+ if (!tlb_hit_page(tlb_addr, page)) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+
+ if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
+ CPUState *cs = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
+ /* Non-faulting page table read failed. */
+ return NULL;
+ }
+
+ /* TLB resize via tlb_fill may have moved the entry. */
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)addr + entry->addend);
+}
+
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,