summaryrefslogtreecommitdiffstats
path: root/tcg/tcg.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/tcg.c')
-rw-r--r--tcg/tcg.c651
1 files changed, 49 insertions, 602 deletions
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 0dc271aac9..ca482c2301 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -43,11 +43,6 @@
#define NO_CPU_IO_DEFS
#include "exec/exec-all.h"
-
-#if !defined(CONFIG_USER_ONLY)
-#include "hw/boards.h"
-#endif
-
#include "tcg/tcg-op.h"
#if UINTPTR_MAX == UINT32_MAX
@@ -63,6 +58,7 @@
#include "elf.h"
#include "exec/log.h"
+#include "tcg-internal.h"
/* Forward declarations for functions declared in tcg-target.c.inc and
used here. */
@@ -153,10 +149,12 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
static int tcg_out_ldst_finalize(TCGContext *s);
#endif
-#define TCG_HIGHWATER 1024
+TCGContext tcg_init_ctx;
+__thread TCGContext *tcg_ctx;
-static TCGContext **tcg_ctxs;
-static unsigned int n_tcg_ctxs;
+TCGContext **tcg_ctxs;
+unsigned int tcg_cur_ctxs;
+unsigned int tcg_max_ctxs;
TCGv_env cpu_env = 0;
const void *tcg_code_gen_epilogue;
uintptr_t tcg_splitwx_diff;
@@ -165,42 +163,6 @@ uintptr_t tcg_splitwx_diff;
tcg_prologue_fn *tcg_qemu_tb_exec;
#endif
-struct tcg_region_tree {
- QemuMutex lock;
- GTree *tree;
- /* padding to avoid false sharing is computed at run-time */
-};
-
-/*
- * We divide code_gen_buffer into equally-sized "regions" that TCG threads
- * dynamically allocate from as demand dictates. Given appropriate region
- * sizing, this minimizes flushes even when some TCG threads generate a lot
- * more code than others.
- */
-struct tcg_region_state {
- QemuMutex lock;
-
- /* fields set at init time */
- void *start;
- void *start_aligned;
- void *end;
- size_t n;
- size_t size; /* size of one region */
- size_t stride; /* .size + guard size */
-
- /* fields protected by the lock */
- size_t current; /* current region index */
- size_t agg_size_full; /* aggregate size of full regions */
-};
-
-static struct tcg_region_state region;
-/*
- * This is an array of struct tcg_region_tree's, with padding.
- * We use void * to simplify the computation of region_trees[i]; each
- * struct is found every tree_size bytes.
- */
-static void *region_trees;
-static size_t tree_size;
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
static TCGRegSet tcg_target_call_clobber_regs;
@@ -457,456 +419,6 @@ static const TCGTargetOpDef constraint_sets[] = {
#include "tcg-target.c.inc"
-/* compare a pointer @ptr and a tb_tc @s */
-static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
-{
- if (ptr >= s->ptr + s->size) {
- return 1;
- } else if (ptr < s->ptr) {
- return -1;
- }
- return 0;
-}
-
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
-{
- const struct tb_tc *a = ap;
- const struct tb_tc *b = bp;
-
- /*
- * When both sizes are set, we know this isn't a lookup.
- * This is the most likely case: every TB must be inserted; lookups
- * are a lot less frequent.
- */
- if (likely(a->size && b->size)) {
- if (a->ptr > b->ptr) {
- return 1;
- } else if (a->ptr < b->ptr) {
- return -1;
- }
- /* a->ptr == b->ptr should happen only on deletions */
- g_assert(a->size == b->size);
- return 0;
- }
- /*
- * All lookups have either .size field set to 0.
- * From the glib sources we see that @ap is always the lookup key. However
- * the docs provide no guarantee, so we just mark this case as likely.
- */
- if (likely(a->size == 0)) {
- return ptr_cmp_tb_tc(a->ptr, b);
- }
- return ptr_cmp_tb_tc(b->ptr, a);
-}
-
-static void tcg_region_trees_init(void)
-{
- size_t i;
-
- tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
- region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- qemu_mutex_init(&rt->lock);
- rt->tree = g_tree_new(tb_tc_cmp);
- }
-}
-
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
-{
- size_t region_idx;
-
- /*
- * Like tcg_splitwx_to_rw, with no assert. The pc may come from
- * a signal handler over which the caller has no control.
- */
- if (!in_code_gen_buffer(p)) {
- p -= tcg_splitwx_diff;
- if (!in_code_gen_buffer(p)) {
- return NULL;
- }
- }
-
- if (p < region.start_aligned) {
- region_idx = 0;
- } else {
- ptrdiff_t offset = p - region.start_aligned;
-
- if (offset > region.stride * (region.n - 1)) {
- region_idx = region.n - 1;
- } else {
- region_idx = offset / region.stride;
- }
- }
- return region_trees + region_idx * tree_size;
-}
-
-void tcg_tb_insert(TranslationBlock *tb)
-{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
-
- g_assert(rt != NULL);
- qemu_mutex_lock(&rt->lock);
- g_tree_insert(rt->tree, &tb->tc, tb);
- qemu_mutex_unlock(&rt->lock);
-}
-
-void tcg_tb_remove(TranslationBlock *tb)
-{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
-
- g_assert(rt != NULL);
- qemu_mutex_lock(&rt->lock);
- g_tree_remove(rt->tree, &tb->tc);
- qemu_mutex_unlock(&rt->lock);
-}
-
-/*
- * Find the TB 'tb' such that
- * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
- * Return NULL if not found.
- */
-TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
-{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
- TranslationBlock *tb;
- struct tb_tc s = { .ptr = (void *)tc_ptr };
-
- if (rt == NULL) {
- return NULL;
- }
-
- qemu_mutex_lock(&rt->lock);
- tb = g_tree_lookup(rt->tree, &s);
- qemu_mutex_unlock(&rt->lock);
- return tb;
-}
-
-static void tcg_region_tree_lock_all(void)
-{
- size_t i;
-
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- qemu_mutex_lock(&rt->lock);
- }
-}
-
-static void tcg_region_tree_unlock_all(void)
-{
- size_t i;
-
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- qemu_mutex_unlock(&rt->lock);
- }
-}
-
-void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
-{
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- g_tree_foreach(rt->tree, func, user_data);
- }
- tcg_region_tree_unlock_all();
-}
-
-size_t tcg_nb_tbs(void)
-{
- size_t nb_tbs = 0;
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- nb_tbs += g_tree_nnodes(rt->tree);
- }
- tcg_region_tree_unlock_all();
- return nb_tbs;
-}
-
-static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
-{
- TranslationBlock *tb = v;
-
- tb_destroy(tb);
- return FALSE;
-}
-
-static void tcg_region_tree_reset_all(void)
-{
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
- /* Increment the refcount first so that destroy acts as a reset */
- g_tree_ref(rt->tree);
- g_tree_destroy(rt->tree);
- }
- tcg_region_tree_unlock_all();
-}
-
-static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
-{
- void *start, *end;
-
- start = region.start_aligned + curr_region * region.stride;
- end = start + region.size;
-
- if (curr_region == 0) {
- start = region.start;
- }
- if (curr_region == region.n - 1) {
- end = region.end;
- }
-
- *pstart = start;
- *pend = end;
-}
-
-static void tcg_region_assign(TCGContext *s, size_t curr_region)
-{
- void *start, *end;
-
- tcg_region_bounds(curr_region, &start, &end);
-
- s->code_gen_buffer = start;
- s->code_gen_ptr = start;
- s->code_gen_buffer_size = end - start;
- s->code_gen_highwater = end - TCG_HIGHWATER;
-}
-
-static bool tcg_region_alloc__locked(TCGContext *s)
-{
- if (region.current == region.n) {
- return true;
- }
- tcg_region_assign(s, region.current);
- region.current++;
- return false;
-}
-
-/*
- * Request a new region once the one in use has filled up.
- * Returns true on error.
- */
-static bool tcg_region_alloc(TCGContext *s)
-{
- bool err;
- /* read the region size now; alloc__locked will overwrite it on success */
- size_t size_full = s->code_gen_buffer_size;
-
- qemu_mutex_lock(&region.lock);
- err = tcg_region_alloc__locked(s);
- if (!err) {
- region.agg_size_full += size_full - TCG_HIGHWATER;
- }
- qemu_mutex_unlock(&region.lock);
- return err;
-}
-
-/*
- * Perform a context's first region allocation.
- * This function does _not_ increment region.agg_size_full.
- */
-static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
-{
- return tcg_region_alloc__locked(s);
-}
-
-/* Call from a safe-work context */
-void tcg_region_reset_all(void)
-{
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
- unsigned int i;
-
- qemu_mutex_lock(&region.lock);
- region.current = 0;
- region.agg_size_full = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- TCGContext *s = qatomic_read(&tcg_ctxs[i]);
- bool err = tcg_region_initial_alloc__locked(s);
-
- g_assert(!err);
- }
- qemu_mutex_unlock(&region.lock);
-
- tcg_region_tree_reset_all();
-}
-
-#ifdef CONFIG_USER_ONLY
-static size_t tcg_n_regions(void)
-{
- return 1;
-}
-#else
-/*
- * It is likely that some vCPUs will translate more code than others, so we
- * first try to set more regions than max_cpus, with those regions being of
- * reasonable size. If that's not possible we make do by evenly dividing
- * the code_gen_buffer among the vCPUs.
- */
-static size_t tcg_n_regions(void)
-{
- size_t i;
-
- /* Use a single region if all we have is one vCPU thread */
-#if !defined(CONFIG_USER_ONLY)
- MachineState *ms = MACHINE(qdev_get_machine());
- unsigned int max_cpus = ms->smp.max_cpus;
-#endif
- if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
- return 1;
- }
-
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
- for (i = 8; i > 0; i--) {
- size_t regions_per_thread = i;
- size_t region_size;
-
- region_size = tcg_init_ctx.code_gen_buffer_size;
- region_size /= max_cpus * regions_per_thread;
-
- if (region_size >= 2 * 1024u * 1024) {
- return max_cpus * regions_per_thread;
- }
- }
- /* If we can't, then just allocate one region per vCPU thread */
- return max_cpus;
-}
-#endif
-
-/*
- * Initializes region partitioning.
- *
- * Called at init time from the parent thread (i.e. the one calling
- * tcg_context_init), after the target's TCG globals have been set.
- *
- * Region partitioning works by splitting code_gen_buffer into separate regions,
- * and then assigning regions to TCG threads so that the threads can translate
- * code in parallel without synchronization.
- *
- * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
- * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
- * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
- * must have been parsed before calling this function, since it calls
- * qemu_tcg_mttcg_enabled().
- *
- * In user-mode we use a single region. Having multiple regions in user-mode
- * is not supported, because the number of vCPU threads (recall that each thread
- * spawned by the guest corresponds to a vCPU thread) is only bounded by the
- * OS, and usually this number is huge (tens of thousands is not uncommon).
- * Thus, given this large bound on the number of vCPU threads and the fact
- * that code_gen_buffer is allocated at compile-time, we cannot guarantee
- * that the availability of at least one region per vCPU thread.
- *
- * However, this user-mode limitation is unlikely to be a significant problem
- * in practice. Multi-threaded guests share most if not all of their translated
- * code, which makes parallel code generation less appealing than in softmmu.
- */
-void tcg_region_init(void)
-{
- void *buf = tcg_init_ctx.code_gen_buffer;
- void *aligned;
- size_t size = tcg_init_ctx.code_gen_buffer_size;
- size_t page_size = qemu_real_host_page_size;
- size_t region_size;
- size_t n_regions;
- size_t i;
-
- n_regions = tcg_n_regions();
-
- /* The first region will be 'aligned - buf' bytes larger than the others */
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
- /*
- * Make region_size a multiple of page_size, using aligned as the start.
- * As a result of this we might end up with a few extra pages at the end of
- * the buffer; we will assign those to the last region.
- */
- region_size = (size - (aligned - buf)) / n_regions;
- region_size = QEMU_ALIGN_DOWN(region_size, page_size);
-
- /* A region must have at least 2 pages; one code, one guard */
- g_assert(region_size >= 2 * page_size);
-
- /* init the region struct */
- qemu_mutex_init(&region.lock);
- region.n = n_regions;
- region.size = region_size - page_size;
- region.stride = region_size;
- region.start = buf;
- region.start_aligned = aligned;
- /* page-align the end, since its last page will be a guard page */
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
- /* account for that last guard page */
- region.end -= page_size;
-
- /*
- * Set guard pages in the rw buffer, as that's the one into which
- * buffer overruns could occur. Do not set guard pages in the rx
- * buffer -- let that one use hugepages throughout.
- */
- for (i = 0; i < region.n; i++) {
- void *start, *end;
-
- tcg_region_bounds(i, &start, &end);
-
- /*
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
- * rejects a permission change from RWX -> NONE. Guard pages are
- * nice for bug detection but are not essential; ignore any failure.
- */
- (void)qemu_mprotect_none(end, page_size);
- }
-
- tcg_region_trees_init();
-
- /* In user-mode we support only one ctx, so do the initial allocation now */
-#ifdef CONFIG_USER_ONLY
- {
- bool err = tcg_region_initial_alloc__locked(tcg_ctx);
-
- g_assert(!err);
- }
-#endif
-}
-
-#ifdef CONFIG_DEBUG_TCG
-const void *tcg_splitwx_to_rx(void *rw)
-{
- /* Pass NULL pointers unchanged. */
- if (rw) {
- g_assert(in_code_gen_buffer(rw));
- rw += tcg_splitwx_diff;
- }
- return rw;
-}
-
-void *tcg_splitwx_to_rw(const void *rx)
-{
- /* Pass NULL pointers unchanged. */
- if (rx) {
- rx -= tcg_splitwx_diff;
- /* Assert that we end with a pointer in the rw region. */
- g_assert(in_code_gen_buffer(rx));
- }
- return (void *)rx;
-}
-#endif /* CONFIG_DEBUG_TCG */
-
static void alloc_tcg_plugin_context(TCGContext *s)
{
#ifdef CONFIG_PLUGIN
@@ -939,10 +451,8 @@ void tcg_register_thread(void)
#else
void tcg_register_thread(void)
{
- MachineState *ms = MACHINE(qdev_get_machine());
TCGContext *s = g_malloc(sizeof(*s));
unsigned int i, n;
- bool err;
*s = tcg_init_ctx;
@@ -956,79 +466,19 @@ void tcg_register_thread(void)
}
/* Claim an entry in tcg_ctxs */
- n = qatomic_fetch_inc(&n_tcg_ctxs);
- g_assert(n < ms->smp.max_cpus);
+ n = qatomic_fetch_inc(&tcg_cur_ctxs);
+ g_assert(n < tcg_max_ctxs);
qatomic_set(&tcg_ctxs[n], s);
if (n > 0) {
alloc_tcg_plugin_context(s);
+ tcg_region_initial_alloc(s);
}
tcg_ctx = s;
- qemu_mutex_lock(&region.lock);
- err = tcg_region_initial_alloc__locked(tcg_ctx);
- g_assert(!err);
- qemu_mutex_unlock(&region.lock);
}
#endif /* !CONFIG_USER_ONLY */
-/*
- * Returns the size (in bytes) of all translated code (i.e. from all regions)
- * currently in the cache.
- * See also: tcg_code_capacity()
- * Do not confuse with tcg_current_code_size(); that one applies to a single
- * TCG context.
- */
-size_t tcg_code_size(void)
-{
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
- unsigned int i;
- size_t total;
-
- qemu_mutex_lock(&region.lock);
- total = region.agg_size_full;
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
- size_t size;
-
- size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
- g_assert(size <= s->code_gen_buffer_size);
- total += size;
- }
- qemu_mutex_unlock(&region.lock);
- return total;
-}
-
-/*
- * Returns the code capacity (in bytes) of the entire cache, i.e. including all
- * regions.
- * See also: tcg_code_size()
- */
-size_t tcg_code_capacity(void)
-{
- size_t guard_size, capacity;
-
- /* no need for synchronization; these variables are set at init time */
- guard_size = region.stride - region.size;
- capacity = region.end + guard_size - region.start;
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
- return capacity;
-}
-
-size_t tcg_tb_phys_invalidate_count(void)
-{
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
- unsigned int i;
- size_t total = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
-
- total += qatomic_read(&s->tb_phys_invalidate_count);
- }
- return total;
-}
-
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{
@@ -1101,8 +551,9 @@ static void process_op_defs(TCGContext *s);
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
TCGReg reg, const char *name);
-void tcg_context_init(TCGContext *s)
+static void tcg_context_init(unsigned max_cpus)
{
+ TCGContext *s = &tcg_init_ctx;
int op, total_args, n, i;
TCGOpDef *def;
TCGArgConstraint *args_ct;
@@ -1167,11 +618,11 @@ void tcg_context_init(TCGContext *s)
*/
#ifdef CONFIG_USER_ONLY
tcg_ctxs = &tcg_ctx;
- n_tcg_ctxs = 1;
+ tcg_cur_ctxs = 1;
+ tcg_max_ctxs = 1;
#else
- MachineState *ms = MACHINE(qdev_get_machine());
- unsigned int max_cpus = ms->smp.max_cpus;
- tcg_ctxs = g_new(TCGContext *, max_cpus);
+ tcg_max_ctxs = max_cpus;
+ tcg_ctxs = g_new0(TCGContext *, max_cpus);
#endif
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
@@ -1179,6 +630,12 @@ void tcg_context_init(TCGContext *s)
cpu_env = temp_tcgv_ptr(ts);
}
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
+{
+ tcg_context_init(max_cpus);
+ tcg_region_init(tb_size, splitwx, max_cpus);
+}
+
/*
* Allocate TBs right before their corresponding translated code, making
* sure that TBs and code are on different cache lines.
@@ -1206,32 +663,16 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
void tcg_prologue_init(TCGContext *s)
{
- size_t prologue_size, total_size;
- void *buf0, *buf1;
-
- /* Put the prologue at the beginning of code_gen_buffer. */
- buf0 = s->code_gen_buffer;
- total_size = s->code_gen_buffer_size;
- s->code_ptr = buf0;
- s->code_buf = buf0;
- s->data_gen_ptr = NULL;
+ size_t prologue_size;
- /*
- * The region trees are not yet configured, but tcg_splitwx_to_rx
- * needs the bounds for an assert.
- */
- region.start = buf0;
- region.end = buf0 + total_size;
+ s->code_ptr = s->code_gen_ptr;
+ s->code_buf = s->code_gen_ptr;
+ s->data_gen_ptr = NULL;
#ifndef CONFIG_TCG_INTERPRETER
- tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(buf0);
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
#endif
- /* Compute a high-water mark, at which we voluntarily flush the buffer
- and start over. The size here is arbitrary, significantly larger
- than we expect the code generation for any one opcode to require. */
- s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
-
#ifdef TCG_TARGET_NEED_POOL_LABELS
s->pool_labels = NULL;
#endif
@@ -1248,32 +689,25 @@ void tcg_prologue_init(TCGContext *s)
}
#endif
- buf1 = s->code_ptr;
+ prologue_size = tcg_current_code_size(s);
+
#ifndef CONFIG_TCG_INTERPRETER
- flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0), (uintptr_t)buf0,
- tcg_ptr_byte_diff(buf1, buf0));
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
+ (uintptr_t)s->code_buf, prologue_size);
#endif
- /* Deduct the prologue from the buffer. */
- prologue_size = tcg_current_code_size(s);
- s->code_gen_ptr = buf1;
- s->code_gen_buffer = buf1;
- s->code_buf = buf1;
- total_size -= prologue_size;
- s->code_gen_buffer_size = total_size;
-
- tcg_register_jit(tcg_splitwx_to_rx(s->code_gen_buffer), total_size);
+ tcg_region_prologue_set(s);
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
FILE *logfile = qemu_log_lock();
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
if (s->data_gen_ptr) {
- size_t code_size = s->data_gen_ptr - buf0;
+ size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
size_t data_size = prologue_size - code_size;
size_t i;
- log_disas(buf0, code_size);
+ log_disas(s->code_gen_ptr, code_size);
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
if (sizeof(tcg_target_ulong) == 8) {
@@ -1287,7 +721,7 @@ void tcg_prologue_init(TCGContext *s)
}
}
} else {
- log_disas(buf0, prologue_size);
+ log_disas(s->code_gen_ptr, prologue_size);
}
qemu_log("\n");
qemu_log_flush();
@@ -2649,6 +2083,19 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
#endif
}
+void tcg_remove_ops_after(TCGOp *op)
+{
+ TCGContext *s = tcg_ctx;
+
+ while (true) {
+ TCGOp *last = tcg_last_op();
+ if (last == op) {
+ return;
+ }
+ tcg_op_remove(s, last);
+ }
+}
+
static TCGOp *tcg_op_alloc(TCGOpcode opc)
{
TCGContext *s = tcg_ctx;
@@ -4480,7 +3927,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
static inline
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
{
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
for (i = 0; i < n_ctxs; i++) {
@@ -4543,7 +3990,7 @@ void tcg_dump_op_count(void)
int64_t tcg_cpu_exec_time(void)
{
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
int64_t ret = 0;