summaryrefslogtreecommitdiffstats
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson2022-08-15 22:13:05 +0200
committerRichard Henderson2022-10-04 21:13:12 +0200
commita976a99a29755e8c7a275ac269db8a0a20d79e95 (patch)
tree27277657c7c72c3e03b61ecd9011ba28ac4571da /accel
parentaccel/tcg: Inline tb_flush_jmp_cache (diff)
downloadqemu-a976a99a29755e8c7a275ac269db8a0a20d79e95.tar.gz
qemu-a976a99a29755e8c7a275ac269db8a0a20d79e95.tar.xz
qemu-a976a99a29755e8c7a275ac269db8a0a20d79e95.zip
include/hw/core: Create struct CPUJumpCache
Wrap the bare TranslationBlock pointer into a structure. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/stubs/tcg-stub.c4
-rw-r--r--accel/tcg/cpu-exec.c10
-rw-r--r--accel/tcg/cputlb.c9
-rw-r--r--accel/tcg/tb-hash.h1
-rw-r--r--accel/tcg/tb-jmp-cache.h24
-rw-r--r--accel/tcg/translate-all.c28
6 files changed, 66 insertions, 10 deletions
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
index 6ce8a34228..c1b05767c0 100644
--- a/accel/stubs/tcg-stub.c
+++ b/accel/stubs/tcg-stub.c
@@ -21,6 +21,10 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{
}
+void tcg_flush_jmp_cache(CPUState *cpu)
+{
+}
+
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index dd58a144a8..2d7e610ee2 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -42,6 +42,7 @@
#include "sysemu/replay.h"
#include "sysemu/tcg.h"
#include "exec/helper-proto.h"
+#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
@@ -252,7 +253,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
@@ -266,7 +267,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) {
return NULL;
}
- qatomic_set(&cpu->tb_jmp_cache[hash], tb);
+ qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
return tb;
}
@@ -987,6 +988,8 @@ int cpu_exec(CPUState *cpu)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
+ uint32_t h;
+
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
@@ -994,7 +997,8 @@ int cpu_exec(CPUState *cpu)
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
+ h = tb_jmp_cache_hash_func(pc);
+ qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
}
#ifndef CONFIG_USER_ONLY
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index c7909fb619..6f1c00682b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -100,10 +100,11 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
+ int i, i0 = tb_jmp_cache_hash_page(page_addr);
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
+ qatomic_set(&jc->array[i0 + i].tb, NULL);
}
}
@@ -356,7 +357,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
qemu_spin_unlock(&env_tlb(env)->c.lock);
- cpu_tb_jmp_cache_clear(cpu);
+ tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) {
qatomic_set(&env_tlb(env)->c.full_flush_count,
@@ -785,7 +786,7 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
* longer to clear each entry individually than it will to clear it all.
*/
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
- cpu_tb_jmp_cache_clear(cpu);
+ tcg_flush_jmp_cache(cpu);
return;
}
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
index 0a273d9605..83dc610e4c 100644
--- a/accel/tcg/tb-hash.h
+++ b/accel/tcg/tb-hash.h
@@ -23,6 +23,7 @@
#include "exec/cpu-defs.h"
#include "exec/exec-all.h"
#include "qemu/xxhash.h"
+#include "tb-jmp-cache.h"
#ifdef CONFIG_SOFTMMU
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
new file mode 100644
index 0000000000..2d8fbb1bfe
--- /dev/null
+++ b/accel/tcg/tb-jmp-cache.h
@@ -0,0 +1,24 @@
+/*
+ * The per-CPU TranslationBlock jump cache.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_TCG_TB_JMP_CACHE_H
+#define ACCEL_TCG_TB_JMP_CACHE_H
+
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
+/*
+ * Accessed in parallel; all accesses to 'tb' must be atomic.
+ */
+struct CPUJumpCache {
+ struct {
+ TranslationBlock *tb;
+ } array[TB_JMP_CACHE_SIZE];
+};
+
+#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 3a63113c41..63ecc15236 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -58,6 +58,7 @@
#include "sysemu/tcg.h"
#include "qapi/error.h"
#include "hw/core/tcg-cpu-ops.h"
+#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
@@ -967,7 +968,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
}
CPU_FOREACH(cpu) {
- cpu_tb_jmp_cache_clear(cpu);
+ tcg_flush_jmp_cache(cpu);
}
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
@@ -1187,8 +1188,9 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
- qatomic_set(&cpu->tb_jmp_cache[h], NULL);
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
+ if (qatomic_read(&jc->array[h].tb) == tb) {
+ qatomic_set(&jc->array[h].tb, NULL);
}
}
@@ -2443,6 +2445,26 @@ int page_unprotect(target_ulong address, uintptr_t pc)
}
#endif /* CONFIG_USER_ONLY */
+/*
+ * Called by generic code at e.g. cpu reset after cpu creation,
+ * therefore we must be prepared to allocate the jump cache.
+ */
+void tcg_flush_jmp_cache(CPUState *cpu)
+{
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
+
+ if (likely(jc)) {
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
+ qatomic_set(&jc->array[i].tb, NULL);
+ }
+ } else {
+ /* This should happen once during realize, and thus never race. */
+ jc = g_new0(CPUJumpCache, 1);
+ jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
+ assert(jc == NULL);
+ }
+}
+
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
void tcg_flush_softmmu_tlb(CPUState *cs)
{