diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 51 |
1 files changed, 33 insertions, 18 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index e3ee4260bd..361078471b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1095,16 +1095,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; } -/* Add a new TLB entry. At most one entry for a given virtual address +/* + * Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. */ -void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, target_ulong size) +void tlb_set_page_full(CPUState *cpu, int mmu_idx, + target_ulong vaddr, CPUTLBEntryFull *full) { CPUArchState *env = cpu->env_ptr; CPUTLB *tlb = env_tlb(env); @@ -1117,35 +1117,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, CPUTLBEntry *te, tn; hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; - int asidx = cpu_asidx_from_attrs(cpu, attrs); - int wp_flags; + int asidx, wp_flags, prot; bool is_ram, is_romd; assert_cpu_is_self(cpu); - if (size <= TARGET_PAGE_SIZE) { + if (full->lg_page_size <= TARGET_PAGE_BITS) { sz = TARGET_PAGE_SIZE; } else { - tlb_add_large_page(env, mmu_idx, vaddr, size); - sz = size; + sz = (hwaddr)1 << full->lg_page_size; + tlb_add_large_page(env, mmu_idx, vaddr, sz); } vaddr_page = vaddr & TARGET_PAGE_MASK; - paddr_page = paddr & TARGET_PAGE_MASK; + paddr_page = full->phys_addr & TARGET_PAGE_MASK; + prot = full->prot; + asidx = cpu_asidx_from_attrs(cpu, full->attrs); section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, - &xlat, &sz, attrs, &prot); + &xlat, &sz, full->attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", - vaddr, paddr, prot, mmu_idx); + vaddr, full->phys_addr, prot, mmu_idx); address = vaddr_page; - if (size < TARGET_PAGE_SIZE) { + if (full->lg_page_size < TARGET_PAGE_BITS) { /* Repeat the MMU check and TLB fill on every access. */ address |= TLB_INVALID_MASK; } - if (attrs.byte_swap) { + if (full->attrs.byte_swap) { address |= TLB_BSWAP; } @@ -1236,8 +1237,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ + desc->fulltlb[index] = *full; desc->fulltlb[index].xlat_section = iotlb - vaddr_page; - desc->fulltlb[index].attrs = attrs; + desc->fulltlb[index].phys_addr = paddr_page; + desc->fulltlb[index].prot = prot; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1272,9 +1275,21 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, qemu_spin_unlock(&tlb->c.lock); } -/* Add a new TLB entry, but without specifying the memory - * transaction attributes to be used. - */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUTLBEntryFull full = { + .phys_addr = paddr, + .attrs = attrs, + .prot = prot, + .lg_page_size = ctz64(size) + }; + + assert(is_power_of_2(size)); + tlb_set_page_full(cpu, mmu_idx, vaddr, &full); +} + void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) |