diff options
author | Stefan Hajnoczi | 2022-09-06 13:31:43 +0200 |
---|---|---|
committer | Stefan Hajnoczi | 2022-09-06 13:31:43 +0200 |
commit | b34b42f1b6a33c455dccce6ceb49962dddbb7a8a (patch) | |
tree | 68143a8c84528fdd3caf8b5206a5df9427ae7263 /accel/tcg/cpu-exec.c | |
parent | Merge tag 'pull-or1k-20220904' of https://github.com/stffrdhrn/qemu into staging (diff) | |
parent | target/riscv: Make translator stop before the end of a page (diff) | |
download | qemu-b34b42f1b6a33c455dccce6ceb49962dddbb7a8a.tar.gz qemu-b34b42f1b6a33c455dccce6ceb49962dddbb7a8a.tar.xz qemu-b34b42f1b6a33c455dccce6ceb49962dddbb7a8a.zip |
Merge tag 'pull-tcg-20220906' of https://gitlab.com/rth7680/qemu into staging
Respect PROT_EXEC in user-only mode.
Fix s390x, i386 and riscv for translations crossing a page.
# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmMW8TcdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8qfwf9EYjXywES/UYzfeJC
# 7irryE3iYddWP+ix3Q4WKaTc61plwP5MMCmeq4PjRo1IBAL5dTtUE1+AFXkEvm4L
# EckSiT5D5d/wYOfhWSWxjblmMk7GUXRRgKzkF1ir3soIftQgXdb43PwAswuOca/v
# dX7wXBJOoWmGWqXNNlQmGIl7c4uQTkOM6iTTLlm4Qg7SJC4MA6EiSZmXlvAs80lN
# TCbBV5P89qseHwzhJUTMZEO+ZMAuTSjFSd/RqBexVa4ty5UJxxgBk21A8JtQPUhr
# Y/Ezb0yhOcwrdjJ8REc267BZbdNgbaVNlUd7c9GKbv8bQUh0AoM9gnjGdoID88x9
# q0f+Pw==
# =HmJB
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 06 Sep 2022 03:05:27 EDT
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* tag 'pull-tcg-20220906' of https://gitlab.com/rth7680/qemu:
target/riscv: Make translator stop before the end of a page
target/riscv: Add MAX_INSN_LEN and insn_len
target/i386: Make translator stop before the end of a page
target/s390x: Make translator stop before the end of a page
accel/tcg: Add fast path for translator_ld*
accel/tcg: Add pc and host_pc params to gen_intermediate_code
accel/tcg: Remove translator_ldsw
accel/tcg: Document the faulting lookup in tb_lookup_cmp
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
accel/tcg: Make tb_htable_lookup static
accel/tcg: Unlock mmap_lock after longjmp
accel/tcg: Properly implement get_page_addr_code for user-only
accel/tcg: Introduce is_same_page()
tests/tcg/i386: Move smc_code2 to an executable section
linux-user: Clear translations on mprotect()
linux-user: Honor PT_GNU_STACK
linux-user/x86_64: Allocate vsyscall page as a commpage
linux-user/hppa: Allocate page zero as a commpage
linux-user/arm: Mark the commpage executable
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'accel/tcg/cpu-exec.c')
-rw-r--r-- | accel/tcg/cpu-exec.c | 143 |
1 files changed, 76 insertions, 67 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index a565a3f8ec..5f43b9769a 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -170,6 +170,76 @@ uint32_t curr_cflags(CPUState *cpu) return cflags; } +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t phys_page1; + uint32_t flags; + uint32_t cflags; + uint32_t trace_vcpu_dstate; +}; + +static bool tb_lookup_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if (tb->pc == desc->pc && + tb->page_addr[0] == desc->phys_page1 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && + tb_cflags(tb) == desc->cflags) { + /* check next page if needed */ + if (tb->page_addr[1] == -1) { + return true; + } else { + tb_page_addr_t phys_page2; + target_ulong virt_page2; + + /* + * We know that the first page matched, and an otherwise valid TB + * encountered an incomplete instruction at the end of that page, + * therefore we know that generating a new TB from the current PC + * must also require reading from the next page -- even if the + * second pages do not match, and therefore the resulting insn + * is different for the new TB. Therefore any exception raised + * here by the faulting lookup is not premature. + */ + virt_page2 = TARGET_PAGE_ALIGN(desc->pc); + phys_page2 = get_page_addr_code(desc->env, virt_page2); + if (tb->page_addr[1] == phys_page2) { + return true; + } + } + } + return false; +} + +static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags) +{ + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.cflags = cflags; + desc.trace_vcpu_dstate = *cpu->trace_dstate; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + if (phys_pc == -1) { + return NULL; + } + desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); + return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); +} + /* Might cause an exception, so have a longjmp destination ready */ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -462,13 +532,11 @@ void cpu_exec_step_atomic(CPUState *cpu) cpu_tb_exec(cpu, tb, &tb_exit); cpu_exec_exit(cpu); } else { - /* - * The mmap_lock is dropped by tb_gen_code if it runs out of - * memory. - */ #ifndef CONFIG_SOFTMMU clear_helper_retaddr(); - tcg_debug_assert(!have_mmap_lock()); + if (have_mmap_lock()) { + mmap_unlock(); + } #endif if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); @@ -487,67 +555,6 @@ void cpu_exec_step_atomic(CPUState *cpu) end_exclusive(); } -struct tb_desc { - target_ulong pc; - target_ulong cs_base; - CPUArchState *env; - tb_page_addr_t phys_page1; - uint32_t flags; - uint32_t cflags; - uint32_t trace_vcpu_dstate; -}; - -static bool tb_lookup_cmp(const void *p, const void *d) -{ - const TranslationBlock *tb = p; - const struct tb_desc *desc = d; - - if (tb->pc == desc->pc && - tb->page_addr[0] == desc->phys_page1 && - tb->cs_base == desc->cs_base && - tb->flags == desc->flags && - tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && - tb_cflags(tb) == desc->cflags) { - /* check next page if needed */ - if (tb->page_addr[1] == -1) { - return true; - } else { - tb_page_addr_t phys_page2; - target_ulong virt_page2; - - virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - phys_page2 = get_page_addr_code(desc->env, virt_page2); - if (tb->page_addr[1] == phys_page2) { - return true; - } - } - } - return false; -} - -TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, - target_ulong cs_base, uint32_t flags, - uint32_t cflags) -{ - tb_page_addr_t phys_pc; - struct tb_desc desc; - uint32_t h; - - desc.env = cpu->env_ptr; - desc.cs_base = cs_base; - desc.flags = flags; - desc.cflags = cflags; - desc.trace_vcpu_dstate = *cpu->trace_dstate; - desc.pc = pc; - phys_pc = get_page_addr_code(desc.env, pc); - if (phys_pc == -1) { - return NULL; - } - desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; - h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); - return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); -} - void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) { if (TCG_TARGET_HAS_direct_jump) { @@ -936,7 +943,9 @@ int cpu_exec(CPUState *cpu) #ifndef CONFIG_SOFTMMU clear_helper_retaddr(); - tcg_debug_assert(!have_mmap_lock()); + if (have_mmap_lock()) { + mmap_unlock(); + } #endif if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); |