summaryrefslogtreecommitdiffstats
path: root/accel/tcg/cpu-exec.c
diff options
context:
space:
mode:
authorPeter Maydell2021-01-07 21:34:05 +0100
committerPeter Maydell2021-01-07 21:34:05 +0100
commite79de63ab1bd1f6550e7b915e433bec1ad1a870a (patch)
treead29060323ecea1b9a0f60d5b08984f310b30e44 /accel/tcg/cpu-exec.c
parentMerge remote-tracking branch 'remotes/stsquad/tags/pull-testing-060121-4' int... (diff)
parenttcg: Constify TCGLabelQemuLdst.raddr (diff)
downloadqemu-e79de63ab1bd1f6550e7b915e433bec1ad1a870a.tar.gz
qemu-e79de63ab1bd1f6550e7b915e433bec1ad1a870a.tar.xz
qemu-e79de63ab1bd1f6550e7b915e433bec1ad1a870a.zip
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210107' into staging
Build fix for ppc64 centos7. Reduce the use of scratch registers for tcg/i386. Use _aligned_malloc for Win32. Enable split w^x code gen buffers. # gpg: Signature made Thu 07 Jan 2021 20:06:38 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth-gitlab/tags/pull-tcg-20210107: (47 commits) tcg: Constify TCGLabelQemuLdst.raddr tcg: Constify tcg_code_gen_epilogue tcg: Remove TCG_TARGET_SUPPORT_MIRROR tcg/arm: Support split-wx code generation tcg/mips: Support split-wx code generation tcg/mips: Do not assert on relocation overflow accel/tcg: Add mips support to alloc_code_gen_buffer_splitwx_memfd tcg/riscv: Support split-wx code generation tcg/riscv: Remove branch-over-branch fallback tcg/riscv: Fix branch range checks tcg/s390: Support split-wx code generation tcg/s390: Use tcg_tbrel_diff tcg/sparc: Support split-wx code generation tcg/sparc: Use tcg_tbrel_diff tcg/ppc: Support split-wx code generation tcg/ppc: Use tcg_out_mem_long to reset TCG_REG_TB tcg/ppc: Use tcg_tbrel_diff tcg: Introduce tcg_tbrel_diff tcg/tci: Push const down through bytecode reading disas: Push const down through host disassembly ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel/tcg/cpu-exec.c')
-rw-r--r--accel/tcg/cpu-exec.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index fa325bb3d8..e0df9b6a1d 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -154,14 +154,13 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
* TCG is not considered a security-sensitive part of QEMU so this does not
* affect the impact of CFI in environment with high security requirements
*/
-QEMU_DISABLE_CFI
-static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
+static inline TranslationBlock * QEMU_DISABLE_CFI
+cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{
CPUArchState *env = cpu->env_ptr;
uintptr_t ret;
TranslationBlock *last_tb;
- int tb_exit;
- uint8_t *tb_ptr = itb->tc.ptr;
+ const void *tb_ptr = itb->tc.ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %d: %p ["
@@ -188,11 +187,20 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
- last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
- tb_exit = ret & TB_EXIT_MASK;
- trace_exec_tb_exit(last_tb, tb_exit);
+ /*
+ * TODO: Delay swapping back to the read-write region of the TB
+ * until we actually need to modify the TB. The read-only copy,
+ * coming from the rx region, shares the same host TLB entry as
+ * the code that executed the exit_tb opcode that arrived here.
+ * If we insist on touching both the RX and the RW pages, we
+ * double the host TLB pressure.
+ */
+ last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
+ *tb_exit = ret & TB_EXIT_MASK;
+
+ trace_exec_tb_exit(last_tb, *tb_exit);
- if (tb_exit > TB_EXIT_IDX1) {
+ if (*tb_exit > TB_EXIT_IDX1) {
/* We didn't start executing this TB (eg because the instruction
* counter hit zero); we must restore the guest PC to the address
* of the start of the TB.
@@ -210,7 +218,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
cc->set_pc(cpu, last_tb->pc);
}
}
- return ret;
+ return last_tb;
}
#ifndef CONFIG_USER_ONLY
@@ -221,6 +229,7 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
{
TranslationBlock *tb;
uint32_t cflags = curr_cflags() | CF_NOCACHE;
+ int tb_exit;
if (ignore_icount) {
cflags &= ~CF_USE_ICOUNT;
@@ -238,7 +247,7 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
- cpu_tb_exec(cpu, tb);
+ cpu_tb_exec(cpu, tb, &tb_exit);
mmap_lock();
tb_phys_invalidate(tb, -1);
@@ -272,6 +281,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
+ int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
start_exclusive();
@@ -288,7 +298,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
- cpu_tb_exec(cpu, tb);
+ cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
/*
@@ -382,7 +392,9 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
if (TCG_TARGET_HAS_direct_jump) {
uintptr_t offset = tb->jmp_target_arg[n];
uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
- tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
+ uintptr_t jmp_rx = tc_ptr + offset;
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
+ tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
} else {
tb->jmp_target_arg[n] = addr;
}
@@ -682,13 +694,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
TranslationBlock **last_tb, int *tb_exit)
{
- uintptr_t ret;
int32_t insns_left;
trace_exec_tb(tb, tb->pc);
- ret = cpu_tb_exec(cpu, tb);
- tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
- *tb_exit = ret & TB_EXIT_MASK;
+ tb = cpu_tb_exec(cpu, tb, tb_exit);
if (*tb_exit != TB_EXIT_REQUESTED) {
*last_tb = tb;
return;