diff options
| author | Emilio G. Cota | 2017-08-01 21:11:12 +0200 |
|---|---|---|
| committer | Richard Henderson | 2018-06-15 19:42:55 +0200 |
| commit | 128ed2278c4e6ad063f101c5dda7999b43f2d8a3 (patch) | |
| tree | aaca67bc2efb756b33cecc236da40344119e5f81 /tcg | |
| parent | tcg: track TBs with per-region BST's (diff) | |
| download | qemu-128ed2278c4e6ad063f101c5dda7999b43f2d8a3.tar.gz qemu-128ed2278c4e6ad063f101c5dda7999b43f2d8a3.tar.xz qemu-128ed2278c4e6ad063f101c5dda7999b43f2d8a3.zip | |
tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx
Thereby making it per-TCGContext. Once we remove tb_lock, this will
avoid an atomic increment every time a TB is invalidated.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
| -rw-r--r-- | tcg/tcg.c | 14 | ||||
| -rw-r--r-- | tcg/tcg.h | 3 |
2 files changed, 17 insertions, 0 deletions
@@ -791,6 +791,20 @@ size_t tcg_code_capacity(void) return capacity; } +size_t tcg_tb_phys_invalidate_count(void) +{ + unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int i; + size_t total = 0; + + for (i = 0; i < n_ctxs; i++) { + const TCGContext *s = atomic_read(&tcg_ctxs[i]); + + total += atomic_read(&s->tb_phys_invalidate_count); + } + return total; +} + /* pool based memory allocation */ void *tcg_malloc_internal(TCGContext *s, int size) { @@ -695,6 +695,8 @@ struct TCGContext { /* Threshold to flush the translated code buffer. */ void *code_gen_highwater; + size_t tb_phys_invalidate_count; + /* Track which vCPU triggers events */ CPUState *cpu; /* *_trans */ @@ -868,6 +870,7 @@ size_t tcg_code_capacity(void); void tcg_tb_insert(TranslationBlock *tb); void tcg_tb_remove(TranslationBlock *tb); +size_t tcg_tb_phys_invalidate_count(void); TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); size_t tcg_nb_tbs(void); |
