summaryrefslogtreecommitdiffstats
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson2022-09-29 19:51:21 +0200
committerRichard Henderson2022-10-04 21:13:12 +0200
commit1d41a79b3c7573c941723455149cfadfe7c2ab37 (patch)
tree6eec27ed1eb95a4841e286156fdf2e7f48e533cc /accel
parentaccel/tcg: Do not align tb->page_addr[0] (diff)
downloadqemu-1d41a79b3c7573c941723455149cfadfe7c2ab37.tar.gz
qemu-1d41a79b3c7573c941723455149cfadfe7c2ab37.tar.xz
qemu-1d41a79b3c7573c941723455149cfadfe7c2ab37.zip
accel/tcg: Inline tb_flush_jmp_cache
This function has two users, who use it incompatibly. In tlb_flush_page_by_mmuidx_async_0, when flushing a single page, we need to flush exactly two pages. In tlb_flush_range_by_mmuidx_async_0, when flushing a range of pages, we need to flush N+1 pages. This avoids double-flushing of jmp cache pages in a range. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a0db2d32a8..c7909fb619 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -107,14 +107,6 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
}
}
-static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
-{
- /* Discard jump cache entries for any tb which might potentially
- overlap the flushed page. */
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
- tb_jmp_cache_clear_page(cpu, addr);
-}
-
/**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
* @desc: The CPUTLBDesc portion of the TLB
@@ -541,7 +533,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
- tb_flush_jmp_cache(cpu, addr);
+ /*
+ * Discard jump cache entries for any tb which might potentially
+ * overlap the flushed page, which includes the previous.
+ */
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
+ tb_jmp_cache_clear_page(cpu, addr);
}
/**
@@ -792,8 +789,14 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
return;
}
- for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
- tb_flush_jmp_cache(cpu, d.addr + i);
+ /*
+ * Discard jump cache entries for any tb which might potentially
+ * overlap the flushed pages, which includes the previous.
+ */
+ d.addr -= TARGET_PAGE_SIZE;
+ for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
+ tb_jmp_cache_clear_page(cpu, d.addr);
+ d.addr += TARGET_PAGE_SIZE;
}
}