summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSergey Fedorov2016-07-15 19:58:44 +0200
committerPaolo Bonzini2016-09-13 19:08:42 +0200
commit118b07308a8cedc16ef63d7ab243a95f1701db40 (patch)
treeac1e17b27cab6c16018a3409a81d8e31c0472c18
parenttcg: Prepare safe tb_jmp_cache lookup out of tb_lock (diff)
downloadqemu-118b07308a8cedc16ef63d7ab243a95f1701db40.tar.gz
qemu-118b07308a8cedc16ef63d7ab243a95f1701db40.tar.xz
qemu-118b07308a8cedc16ef63d7ab243a95f1701db40.zip
tcg: Prepare safe access to tb_flushed out of tb_lock
Ensure atomicity and ordering of CPU's 'tb_flushed' access for future translation block lookup out of 'tb_lock'. This field can only be touched from another thread by tb_flush() in user mode emulation. So the only access to be sequential atomic is: * a single write in tb_flush(); * reads/writes out of 'tb_lock'. In future, before enabling MTTCG in system mode, tb_flush() must be safe and this field becomes unnecessary. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20160715175852.30749-5-sergey.fedorov@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--cpu-exec.c16
-rw-r--r--translate-all.c4
2 files changed, 9 insertions, 11 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 32b58edb31..877ff8ed70 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -338,13 +338,6 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags);
}
- if (cpu->tb_flushed) {
- /* Ensure that no TB jump will be modified as the
- * translation buffer has been flushed.
- */
- last_tb = NULL;
- cpu->tb_flushed = false;
- }
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
* system emulation. So it's not safe to make a direct jump to a TB
@@ -356,7 +349,12 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
#endif
/* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tb_add_jump(last_tb, tb_exit, tb);
+ /* Check if translation buffer has been flushed */
+ if (cpu->tb_flushed) {
+ cpu->tb_flushed = false;
+ } else {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
}
tb_unlock();
return tb;
@@ -617,7 +615,7 @@ int cpu_exec(CPUState *cpu)
break;
}
- cpu->tb_flushed = false; /* reset before first TB lookup */
+ atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, last_tb, tb_exit);
diff --git a/translate-all.c b/translate-all.c
index 77ae59d7e9..e753a50640 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -848,7 +848,6 @@ void tb_flush(CPUState *cpu)
> tcg_ctx.code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
- tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
int i;
@@ -856,9 +855,10 @@ void tb_flush(CPUState *cpu)
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
- cpu->tb_flushed = true;
+ atomic_mb_set(&cpu->tb_flushed, true);
}
+ tcg_ctx.tb_ctx.nb_tbs = 0;
qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb();